prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import geopandas as gpd
import os
from osgeo import gdal
import numpy as np
import matplotlib.pyplot as plt
import cv2
import csv
np.random.seed(100)
create_geo_files = False
create_mask = False
create_train_data = True
seq_len = 20
base_path = "../datasets/Ushant-Traffic"
texts_path = "../datasets/Ushant-Traffic/text_data"
os.makedirs(texts_path, exist_ok=True)
jsons_path = "../datasets/Ushant-Traffic/json_data"
os.makedirs(jsons_path, exist_ok=True)
trajs = os.listdir(texts_path)
trajs_paths = [os.path.join(texts_path, traj_name) for traj_name in trajs]
if create_geo_files:
for path in trajs_paths:
df = pd.read_csv(path, delimiter = ";")
gdf = gpd.GeoDataFrame(df, geometry=(gpd.points_from_xy(df.x, df.y)))
file_name = os.path.splitext(os.path.basename(path))[0] + ".json"
gdf.to_file(os.path.join(jsons_path, file_name), driver="GeoJSON")
if create_train_data:
train_folder = os.path.join(base_path, 'train')
os.makedirs(train_folder, exist_ok=True)
valid_folder = os.path.join(base_path, 'valid')
os.makedirs(valid_folder, exist_ok=True)
test_folder = os.path.join(base_path, 'test')
os.makedirs(test_folder, exist_ok=True)
all_data = []
id = 1
for path in trajs_paths:
df = pd.read_csv(path, delimiter=";")
df = df.get(['x', 'y'])
df_length = df.shape[0]//seq_len
for idx in range(0, df_length*seq_len, seq_len):
all_data.append(df.values[idx:idx+seq_len])
# Split data to train, valid and test
train_length = int(0.7 * len(all_data))
test_length = int(0.2 * len(all_data))
valid_length = len(all_data) - train_length - test_length
np.random.shuffle(all_data)
# Create train
train_temp = np.array(all_data[0:train_length])
train_data = np.zeros((train_temp.shape[0], 20, 4))
train_data[:, :, -2] = train_temp[:, :, 0]
train_data[:, :, -1] = train_temp[:, :, 1]
train_data = train_data.reshape((train_temp.shape[0]*seq_len, 4))
train_data[:, 0] = np.array(range(0, train_temp.shape[0]*seq_len, 1))
train_data[:, 1] = np.repeat(np.array(range(0, train_temp.shape[0], 1)), 20)
pd.DataFrame(train_data).to_csv(os.path.join(train_folder, "train.csv"), sep="\t", index=False)
# pd.read_csv(os.path.join(train_folder, "train.csv"), delimiter="\t", index_col=False)
# Create test
test_temp = np.array(all_data[train_length: train_length+test_length])
test_data = np.zeros((test_temp.shape[0], 20, 4))
test_data[:, :, -2] = test_temp[:, :, 0]
test_data[:, :, -1] = test_temp[:, :, 1]
test_data = test_data.reshape((test_temp.shape[0]*seq_len, 4))
test_data[:, 0] = np.array(range(0, test_temp.shape[0]*seq_len, 1))
test_data[:, 1] = np.repeat(np.array(range(0, test_temp.shape[0], 1)), 20)
| pd.DataFrame(test_data) | pandas.DataFrame |
from typing import List, Optional, Dict
from pathlib import Path
import pandas as pd
import wfdb
from .definitions import RespiratoryEventType, RespiratoryEvent, EnduringEvent, PhysioNetDataset, _Event, TransientEvent, \
SleepStageType, SleepStageEvent
_SLEEP_STAGE_KEYWORDS = [s.value for s in SleepStageType]
def _create_enduring_event(start: pd.Timedelta, end: pd.Timedelta, aux_note: str) -> EnduringEvent:
"""
Creates an EnduringEvent out of the given parameters. In certain cases, it chooses the sub-class ApneaEvent instead.
"""
if "pnea" in aux_note:
if "centralapnea" in aux_note:
event_type = RespiratoryEventType.CentralApnea
elif "mixedapnea" in aux_note:
event_type = RespiratoryEventType.MixedApnea
elif "obstructiveapnea" in aux_note:
event_type = RespiratoryEventType.ObstructiveApnea
elif "hypopnea" in aux_note:
event_type = RespiratoryEventType.Hypopnea
else:
raise RuntimeError(f"Unrecognized *pnea event aux_note: '{aux_note}'")
return RespiratoryEvent(start=start, end=end, aux_note=aux_note, event_type=event_type)
return EnduringEvent(start=start, end=end, aux_note=aux_note)
def _create_transient_event(start: pd.Timedelta, aux_note: str) -> TransientEvent:
if aux_note in _SLEEP_STAGE_KEYWORDS:
sleep_stage_type = SleepStageType(aux_note)
return SleepStageEvent(start=start, aux_note=aux_note, sleep_stage_type=sleep_stage_type)
return TransientEvent(start=start, aux_note=aux_note)
def read_physionet_dataset(dataset_folder: Path, dataset_filename_stem: str = None) -> PhysioNetDataset:
"""
Reads datasets of the [*PhysioNet Challenge 2018*](https://physionet.org/content/challenge-2018/1.0.0/). Reads
samples and annotations from a given dataset folder.
*Annotations are optional*, so only parsed when available in the given folder.
:param dataset_folder: The folder containing our .mat, .hea and .arousal files
:param dataset_filename_stem: Name that all the dataset files have in common. If None, we'll derive it from the
folder name.
:return: The Dataset instance.
"""
assert dataset_folder.is_dir() and dataset_folder.exists(), \
f"Given dataset folder {dataset_folder} either not exists or is no folder."
if dataset_filename_stem is None:
dataset_filename_stem = dataset_folder.name
# Read the signal files (.hea & .mat)
record = wfdb.rdrecord(record_name=str(dataset_folder / dataset_filename_stem))
sample_frequency = float(record.fs)
index = pd.timedelta_range(start=0, periods=len(record.p_signal), freq=f"{1/sample_frequency*1_000_000}us")
df_signals = | pd.DataFrame(data=record.p_signal, columns=record.sig_name, index=index, dtype="float32") | pandas.DataFrame |
import pandas as pd
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
import scipy as sp
import numpy as np
FULL_DATASET = True
# Decide whether to run the old code with the full dataset or the new code
# with selected peptides
if (FULL_DATASET):
# Load excel file of party processed data
data_xls = pd.ExcelFile('./data/timeseries/merged_normalized.xlsx')
# Create empty data frame for result data
result_data = pd.DataFrame()
# Load all of the sheets into a list
sheet_list = {}
index = 0
for sheet_name in data_xls.sheet_names:
# Load sheet into list
sheet_list[index] = data_xls.parse(sheet_name)
index += 1
# Get rid of all rows except duplicates
duplicate_data = sheet_list[0][(sheet_list[0]['peptide'].isin(sheet_list[1]['peptide']))].dropna().reset_index(drop=True)
duplicate_data = duplicate_data[(duplicate_data['peptide'].isin(sheet_list[2]['peptide']))].dropna().reset_index(drop=True)
# Trim the duplicate data to just the first four rows (information about peptides)
result_data = duplicate_data.iloc[:,0:4]
# Create variables for the data in A, B, and C
data_A = sheet_list[0][(sheet_list[0]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
data_B = sheet_list[1][(sheet_list[1]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
data_C = sheet_list[2][(sheet_list[2]['peptide'].isin(duplicate_data['peptide']))].dropna().reset_index(drop=True)
# Add the data from sheets A, B, and C respectively
result_data = pd.concat([result_data, data_A.iloc[:,4:12]], axis=1, ignore_index=True)
result_data = | pd.concat([result_data, data_B.iloc[:,4:12]], axis=1, ignore_index=True) | pandas.concat |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
#Code starts here
data=pd.read_csv(path)
data_sample=data.sample(sample_size,random_state=0)
sample_mean=data_sample['installment'].mean()
sample_std=data_sample.installment.std()
margin_of_error=z_critical*(sample_std/np.sqrt(sample_size))
confidence_interval=((sample_mean-margin_of_error),(sample_mean+margin_of_error))
true_mean=data.installment.mean()
print('fall') if true_mean>confidence_interval[0] or true_mean<confidence_interval[1] else print('donot fall')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
fix,axes=plt.subplots(nrows = 3 , ncols = 1,figsize=(10,7))
for i in range(len(sample_size)):
m=[]
for j in range(1000):
data_Sample=data['installment'].sample(sample_size[i])
m.append(data_Sample.mean())
mean_series= | pd.Series(m) | pandas.Series |
import pandas as pd
def compact_non_binary_features(dir_dataset, features, new_feature, new_path, neutral=None):
"""
Args:
dir_dataset (str) : path of the dataset
features (list) : list of binary features to merge
new_feature (str) : name of the new categorical feature
new_path (str) : directory + filename (e.g. 'data/cleaned_dataset.csv')
neutral (str) : str for a neutral combination (if all "non" positive have a meaning)
Returns:
Save the compacted dataset, for which we have a new column called
"race", in which we add the race, given by the binary features
"""
df = pd.read_csv(dir_dataset)
new_feature_list = []
# iterrows is very expansive
# to do : try to use some more efficient workaround
for index, row in df.iterrows():
found = False
for feature in features:
if row[feature] == 1:
new_feature_list.append(feature)
found = True
if not found:
new_feature_list.append(neutral)
for feature in features:
df.drop(columns=[feature], inplace=True)
df[new_feature] = | pd.Series(new_feature_list) | pandas.Series |
from scipy import signal
import pandas as pd
import numpy as np
import datetime
import re
import feather
import os, sys, argparse
import plotnine as p9
from matplotlib import gridspec
import matplotlib.pyplot as plt
# custom cwt function because signal.cwt doesn't work on morlets...
def cwt_time(data, frequencies, dt, axis, samplerate):
import scipy
from scipy.signal import morlet
# wavelets can be complex so output is complex
output = np.zeros((len(frequencies),) + data.shape, dtype=np.complex)
# compute in time
slices = [None for _ in data.shape]
slices[axis] = slice(None)
for ind, frequency in enumerate(frequencies):
# number of points needed to capture wavelet
n_sample_wavelet=int(20*samplerate/frequency)
scale_factor = frequency*n_sample_wavelet/(2*5*samplerate)
wavelet_data = morlet(n_sample_wavelet, 5, scale_factor)
output[ind, :] = scipy.signal.fftconvolve(data,wavelet_data[tuple(slices)],mode='same')
return output
# Plots the cwt data on ax
# Returns the cwt matrix
def plot_cwt(small_data, feature, widths, a, b, samplerate, ax, use_quant):
# Filter and cwt transform the data
cwtmatr = cwt_time(signal.filtfilt(a, b, small_data[feature]-np.mean(small_data[feature])), widths, 1, 0, samplerate)
# Only need magnitude component
signal_mag = cwtmatr.real**2+cwtmatr.imag**2
# Plotting
t = np.arange(small_data[feature].shape[0])
T, S = np.meshgrid(t, widths)
vmin = np.min(signal_mag)
vmax = np.max(signal_mag)
if use_quant:
vmin, vmax = np.quantile(signal_mag, [0.001,0.99])
c_levels = np.linspace(vmin, vmax, 50)
_ = ax.contourf(T/samplerate, S, signal_mag, 100, levels=c_levels, extend='both')
_ = ax.semilogy()
_ = ax.set_ylabel('Frequency, Hz')
_ = ax.set_title(feature)
return cwtmatr, t
def plot_video_clip(args):
input_file = args.input_file
if os.path.splitext(input_file)[1] == '.feather':
data = feather.read_dataframe(input_file)
elif os.path.splitext(input_file)[1] == '.csv':
data = pd.read_csv(input_file)
else:
print('Input file format not recognized: ' + os.path.splitext(input_file)[1])
exit(1)
# Run calculations, if not already present
data['x'] = data['m10']/data['m00']
data['y'] = data['m01']/data['m00']
data['a'] = data['m20']/data['m00']-data['x']**2
data['b'] = 2*(data['m11']/data['m00'] - data['x']*data['y'])
data['c'] = data['m02']/data['m00'] - data['y']**2
data['w'] = np.sqrt(8*(data['a']+data['c']-np.sqrt(data['b']**2+(data['a']-data['c'])**2)))/2
data['l'] = np.sqrt(8*(data['a']+data['c']+np.sqrt(data['b']**2+(data['a']-data['c'])**2)))/2
data['theta'] = 1/2.*np.arctan(2*data['b']/(data['a']-data['c']))
data['aspect_w/l'] = data['w']/data['l']
data['circularity'] = data['m00']*4*np.pi/data['perimeter']**2
data['rectangular'] = data['m00']/(data['w']*data['l'])
data['eccentricity'] = np.sqrt(data['w']**2 + data['l']**2)/data['l']
data['elongation'] = (data['mu20'] + data['mu02'] + (4 * data['mu11']**2 + (data['mu20'] - data['mu02'])**2)**0.5) / (data['mu20'] + data['mu02'] - (4 * data['mu11']**2 + (data['mu20'] - data['mu02'])**2)**0.5)
data['hu0'] = data['nu20'] + data['nu02']
data['hu1'] = (data['nu20']-data['nu02'])**2 + 4*data['nu11']**2
data['hu2'] = (data['nu30']-3*data['nu12'])**2 + (3*data['nu21']-data['nu03'])**2
data['hu3'] = (data['nu30']+data['nu12'])**2 + (data['nu21']+data['nu03'])**2
data['hu4'] = (data['nu30']-3*data['nu12'])*(data['nu30']+data['nu12'])*((data['nu30']+data['nu12'])**2-3*(data['nu21']+data['nu03'])**2) + (3*data['nu21']-data['nu03'])*(data['nu21']+data['nu03'])*(3*(data['nu30']+data['nu12'])**2-(data['nu21']+data['nu03'])**2)
data['hu5'] = (data['nu20']-data['nu02'])*((data['nu03']+data['nu12'])**2) + 4*data['nu11']*(data['nu30']+data['nu12'])*(data['nu21']+data['nu03'])
data['hu6'] = (3*data['nu21']-data['nu03'])*(data['nu21']+data['nu03'])*(3*(data['nu30']+data['nu12'])**2-(data['nu21']+data['nu03'])**2) + (data['nu30']-3*data['nu12'])*(data['nu21']+data['nu03'])*(3*(data['nu30']+data['nu12'])**2-(data['nu21']+data['nu03'])**2)
#
# Assumed capturing information
samplerate = 30.
# Bandpass filtering of signal
if args.filter_breathing:
a, b = signal.butter(7, [2*2./samplerate, 2*8./samplerate], 'bandpass')
else:
# Default to 0.5-14.5hz bandpass
a, b = signal.butter(5, [1./samplerate, 29./samplerate], 'bandpass')
widths = np.arange(1,15,0.01)
#
feature = args.feature
if not (feature in data.keys()):
print('Feature not available: ' + feature)
print('Available features: ' + data.keys())
exit(1)
# Start the chunking of the data...
# Use "epochs"
has_found_epochs = False
if args.use_epochs == True:
if 'unique_epoch_id' in data.keys():
has_found_epochs = True
# Roughly clip chunks by epoch lengths...
if args.num_samples > 0:
num_epoch_per_plot = np.ceil(args.num_samples/300)
else:
num_epoch_per_plot = 1
data_groups = data.groupby('unique_epoch_id').groups
# Grab indices for groupings
data_chunks = []
cur_chunk_count = 0
cur_chunks = np.array([])
for i in data_groups:
if cur_chunk_count == num_epoch_per_plot:
data_chunks.append(np.reshape(cur_chunks, [-1]))
cur_chunks = np.array([])
cur_chunk_count = 0
# Pull out only the indices to match old method...
#cur_chunks = np.concatenate(cur_chunks, data_groups[i].values)
cur_chunks = np.concatenate((cur_chunks, np.array(data_groups[i].values)))
cur_chunk_count = cur_chunk_count + 1
# Old method (Plot constant durations)
if not has_found_epochs:
if args.num_samples < samplerate:
data_chunks = [np.arange(len(data))]
else:
data_chunks = [np.arange(args.num_samples)+i*args.num_samples for i,x in enumerate(np.arange(len(data)//args.num_samples))]
if (len(data)%args.num_samples) > samplerate:
data_chunks.append(np.arange(len(data)%args.num_samples)-1+args.num_samples*len(data_chunks))
for i, chunk in enumerate(data_chunks):
small_data = data.iloc[chunk,:]
# Filter and cwt transform the data
#fig = plt.figure(figsize=(12,9))
fig = (p9.ggplot()+p9.geom_blank(data= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
def scal():
dataset = pd.read_csv('../train_cuting/train_cutting2_lstm_mean.csv')
dataset['Timestamp'] = pd.to_datetime(dataset['Timestamp'])
dataset = dataset.set_index('Timestamp')
dataset.index.name = 'date'
scaler = MinMaxScaler(feature_range=(0, 1))
values = dataset['Value']
values = values.values
values = values.reshape(-1, 1)
scaler.fit_transform(values)
return scaler
def data_process_lstm(path1=None, name1=None, path2=None, name2=None, scaler=None):
if path1 == None:
dataset = pd.read_csv(name1)
else:
dataset = pd.read_csv(path1 + '/' + name1)
dataset['Timestamp'] = pd.to_datetime(dataset['Timestamp'])
dataset = dataset.set_index('Timestamp')
dataset.index.name = 'date'
len = dataset['groups'].max()
values = dataset['Value']
values = values.values
values = values.reshape(-1, 1)
values = scaler.fit_transform(values)
dataset['Value'] = values
for i in range(1, len+1):
temp_data = dataset[dataset['groups'] == i]
temp_data = temp_data['Value']
temp_values = temp_data.values
temp_values = temp_values.astype('float32')
temp_values = temp_values.reshape(-1, 1)
reframed = series_to_supervised(temp_values, 24, 1)
if i == 1:
set = reframed
else:
set = | pd.concat([set, reframed]) | pandas.concat |
from datetime import datetime
import codecs, re, csv, pandas as pd
from django.db import connection, connections
from .middleware import get_current_ngrp
#from .forms import XDBX
G_SDICT = {}
G_DUPL = []
G_QNO = []
def to_test(ba):
return str(len(ba))
def to_byte_str(ba):
return str(ba)[2:-1]
def to_bytes(bs):
return bytes(codecs.decode(bs.strip(), 'unicode_escape'), 'iso_8859-1')
def rstr_to_str_u(bs):
return bytes(codecs.decode(bs.strip(), 'unicode_escape'), 'utf8').decode()
def rstr_to_str(bs):
return bytes(codecs.decode(bs.strip(), 'unicode_escape'), 'iso_8859-1').decode()
def hex_to_int(bv):
return str(int(bv, 16))
def hex_to_byte(hv):
return bytes.fromhex(hv.strip())
def to_txt(bv):
return bv.decode()
def to_ipv4(bv):
return ".".join(str(b) for b in bv)
def to_intbit(bv, s, l):
e = int.from_bytes(bv,byteorder='big')
d = 2 ** s
c = d * ((2 ** l) - 1)
return str(int((e & c) / d))
def to_intbitn(bv, s, l):
e = int.from_bytes(bv,byteorder='big')
d = 2 ** s
c = d * ((2 ** l) - 1)
return int((e & c) / d)
def to_hex(bv, d=None, n=None):
if d == None:
return bv.hex()
else:
a = bv.hex()
return d.join(a[j:j+n] for j in range(0, len(a), n))
def to_intBE(bv,ed):
return str(int.from_bytes(bv,byteorder=ed))
def to_intB(bv):
return str(int.from_bytes(bv,byteorder='big'))
def to_intBn(bv):
return int.from_bytes(bv,byteorder='big')
def to_pcap_ts(bv, ty, ed):
s = int.from_bytes(bv[:4],byteorder=ed)
ns = int.from_bytes(bv[4:],byteorder=ed)
if ty == '128':
ts = (s * 4294967296) + ns
return (ts//1000000, ts%1000000)
return (s, ns)
def to_utc_d(val):
return str(datetime.utcfromtimestamp(val))
def to_utc(bv,ed):
e = int.from_bytes(bv,byteorder=ed)
return str(datetime.utcfromtimestamp(e))
def to_sigp(pa,sm):
for p in pa:
for k,v in sm.items():
if p in k.split(','): return v
return 'unk'
def to_hdl(bv,s, l, hd):
e = int.from_bytes(bv,byteorder='big')
d = 2 ** s
c = d * ((2 ** l) - 1)
hl = 4 * int((e & c) / d)
return hl - hd
def i_var(iv):
return str(iv)
def i_vara(iv,dl):
return dl.join(str(v) for v in iv)
def to_paylen(bv,ver):
if ver == 6:
cv = to_intBn(bv[:2])-8 if bv[2] == b'44' else to_intBn(bv[:2])
elif ver == 4: cv = to_intBn(bv[2:]) - to_intbitn(bv[0:1],0,4)*4
return str(cv)
def dupp(bv,bs):
if bv in G_DUPL: return 'Y'
if bs > 0:
G_DUPL[G_QNO[0]] = bv
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
return 'N'
def defrag(recs,rno,skb,bv,va,bs):
tl = to_intBn(bv[2:4])
if tl == 0: return '0'
if tl < len(bv): bv = bv[:tl]
cv = str(bv[9])
ofs = to_intbitn(bv[6:8],0,13)
mf = to_intbit(bv[6:7],5,1)
hl = to_intbitn(bv[0:1],0,4)*4
fr0 = bv[8+hl:] if cv =='50' and ofs == 0 else bv[hl:]
if cv =='50' and mf == '0':
pl = bv[-14] + 14
fr0 = fr0[:-pl]
cv = str(bv[-13])
if ofs == 0 and mf != '1':
va.append([rno,fr0])
return cv
rl = len(recs)
clen = tl - hl
pid0 = to_intB(bv[4:6])
frl = {ofs:[rno,fr0]}
tlen = ofs*8 + clen if ofs != 0 and mf == '0' else 0
for i in range(1,20):
rn = rno + i
if rn >= rl: break
if recs[rn][28+skb:30+skb:].hex() != '0800': continue
rv = recs[rn][30+skb:]
if rv in G_DUPL: continue
tl = to_intBn(rv[2:4])
bv = rv[:tl]
ofs = to_intbitn(bv[6:8],0,13)
mf = to_intbit(bv[6:7],5,1)
pid = to_intB(bv[4:6])
if pid == pid0:
hl = to_intbitn(bv[0:1],0,4)*4
ipdl = tl - hl
clen += ipdl
if ofs != 0 and mf == '0': tlen = ofs*8 + ipdl
nh = str(bv[9])
fr = bv[8+hl:] if nh =='50' and ofs == 0 else bv[hl:]
if nh =='50' and mf == '0':
pl = bv[-14] + 14
fr = fr[:-pl]
cv = str(bv[-13])
frl.update({ofs:[rn, fr]})
G_DUPL[G_QNO[0]] = rv
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if clen == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', b''
for k,v in frs.items():
pid += ',%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return cv
def defrag_g(recs,rno,bv,va,bs):
cv = str(bv[9])
ofs = to_intbitn(bv[6:8],0,13)
mf = to_intbit(bv[6:7],5,1)
hl = to_intbitn(bv[0:1],0,4)*4
fr0 = bv[8+hl:] if cv =='50' and ofs == 0 else bv[hl:]
if cv =='50' and mf == '0':
pl = bv[-14] + 14
fr0 = fr0[:-pl]
cv = str(bv[-13])
if ofs == 0 and mf != '1':
va.append([recs.loc[rno,'pidxx'],fr0])
return cv
rl = len(recs)
clen = to_intBn(bv[2:4]) - hl
pid0 = to_intB(bv[4:6])
frl = {ofs:[recs.loc[rno,'pidxx'],fr0]}
tlen = ofs*8 + clen if ofs != 0 and mf == '0' else 0
for i in range(1,20):
rn = rno + i
if rn >= rl: break
bv = to_bytes(recs.loc[rn,'apd'])
ofs = to_intbitn(bv[6:8],0,13)
mf = to_intbit(bv[6:7],5,1)
pid = to_intB(bv[4:6])
if pid == pid0:
hl = to_intbitn(bv[0:1],0,4)*4
ipdl = to_intBn(bv[2:4]) - hl
clen += ipdl
if ofs != 0 and mf == '0': tlen = ofs*8 + ipdl
nh = str(bv[9])
fr = bv[8+hl:] if nh =='50' and ofs == 0 else bv[hl:]
if nh =='50' and mf == '0':
pl = bv[-14] + 14
fr = fr[:-pl]
cv = str(bv[-13])
frl.update({ofs:[recs.loc[rn,'pidxx'], fr]})
G_DUPL[G_QNO[0]] = bv
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if clen == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', b''
for k,v in frs.items():
pid += '-%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return cv
def defrag6(recs,rno,skb,bv,va,bs):
cv = str(bv[6])
if cv != '44':
if cv == '50':
pl = len(bv) - bv[-14] - 14
fr0 = bv[48:pl]
cv = str(bv[-13])
else: fr0 = bv[40:]
va.append([rno,fr0])
return cv
rl = len(recs)
clen = to_intBn(bv[4:6]) - 8
pid0 = to_intB(bv[44:48])
ofs = to_intbitn(bv[42:44],3,13)
mf = to_intbit(bv[43:44],0,1)
cv = str(bv[40])
fr0 = bv[56:] if cv =='50' and ofs == 0 else bv[48:]
if cv =='50' and mf == '0':
pl = bv[-14] + 14
fr0 = fr0[:-pl]
cv = str(bv[-13])
frl = {ofs:[rno,fr0]}
tlen = ofs*8 + clen if ofs != 0 and mf == '0' else 0
for i in range(1,20):
rn = rno + i
if rn >= rl: break
if recs[rn][28+skb:30+skb:].hex() != '86dd': continue
bv = recs[rn][30+skb:]
if bv in G_DUPL: continue
if str(bv[6]) == '44':
pid = to_intB(recs[rn][74+skb:78+skb])
if pid == pid0:
ofs = to_intbitn(bv[42:44],3,13)
mf = to_intbit(bv[43:44],0,1)
ipdl = to_intBn(bv[4:6]) - 8
clen += ipdl
if ofs != 0 and mf == '0': tlen = ofs*8 + ipdl
nh = str(bv[40])
fr = bv[56:] if nh =='50' and ofs == 0 else bv[48:]
if nh =='50' and mf == '0':
pl = bv[-14] + 14
fr = fr[:-pl]
cv = str(bv[-13])
frl.update({ofs:[rn, fr]})
G_DUPL[G_QNO[0]] = bv
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if clen == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', b''
for k,v in frs.items():
pid += ',%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return cv
def defrag6g(recs,rno,bv,va,bs):
cv = str(bv[6])
if cv != '44':
if cv == '50':
pl = len(bv) - bv[-14] - 14
fr0 = bv[48:pl]
cv = str(bv[-13])
else: fr0 = bv[40:]
va.append([recs.loc[rno,'pidxx'],fr0])
return cv
rl = len(recs)
clen = to_intBn(bv[4:6]) - 8
pid0 = to_intB(bv[44:48])
ofs = to_intbitn(bv[42:44],3,13)
mf = to_intbit(bv[43:44],0,1)
cv = str(bv[40])
fr0 = bv[56:] if cv =='50' and ofs == 0 else bv[48:]
if cv =='50' and mf == '0':
pl = bv[-14] + 14
fr0 = fr0[:-pl]
cv = str(bv[-13])
frl = {ofs:[recs.loc[rno,'pidxx'],fr0]}
tlen = ofs*8 + clen if ofs != 0 and mf == '0' else 0
for i in range(1,20):
rn = rno + i
if rn >= rl: break
bv = to_bytes(recs.loc[rn,'apd'])
if str(bv[6]) == '44':
pid = to_intB(bv[44:48])
if pid == pid0:
ofs = to_intbitn(bv[42:44],3,13)
mf = to_intbit(bv[43:44],0,1)
ipdl = to_intBn(bv[4:6]) - 8
clen += ipdl
if ofs != 0 and mf == '0': tlen = ofs*8 + ipdl
nh = str(bv[40])
fr = bv[56:] if nh =='50' and ofs == 0 else bv[48:]
if nh =='50' and mf == '0':
pl = bv[-14] + 14
fr = fr[:-pl]
cv = str(bv[-13])
frl.update({ofs:[recs.loc[rn,'pidxx'], fr]})
G_DUPL[G_QNO[0]] = bv
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if clen == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', b''
for k,v in frs.items():
pid += '-%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return cv
def apdata(bv,tt,sgnp,va):
dl = len(bv)
if tt == '132':
sp = to_intBn(bv[14:16]) if bv[12] == 3 else 0
if dl > (12+sp) and bv[12+sp] == 0:
va.append(bv[sp+28:])
va.append(to_intbitn(bv[sp+13:sp+14],0,4) % 2)
va.append(to_intBn(bv[sp+16:sp+20]))
va.append(dl - sp - 28)
else: va.append('')
elif tt == '6':
flg = to_intbitn(bv[13:14],0,4)
if flg == 0 or flg == 8:
hl = to_intbitn(bv[12:13],4,4) * 4
va.append(bv[hl:])
va.append(flg)
va.append(to_intBn(bv[4:8]))
va.append(dl - hl)
else: va.append('')
else:
va.append(bv[16:]) if sgnp == 'gtp' else va.append(bv[8:])
va.append('')
va.append('')
va.append(dl - 16) if sgnp == 'gtp' else va.append(dl - 8)
return '' if va[0] == '' or va[3] == 0 else str(va[3])
def ap_msg(apd,sgnp='sip',dc=None):
if sgnp == 'sip':
m = re.search('(^(?=[A-Z]+\s(sip|tel):)|(?<=^SIP/2\.0\s))\S{3}', apd.decode('utf-8'))
cv = 'NaN' if m == None else m[0]
elif sgnp == 'dia':
if apd[0] == 1 and to_intbitn(apd[4:5],0,4) == 0 and to_intbitn(apd[5:7],4,12) == 0:
cc = to_intB(apd[5:8])
cv = '%s%s' % (dc[cc] if cc in dc else cc, 'R' if to_intbitn(apd[4:5],4,4) > 7 else 'A')
else: cv = 'NaN'
elif sgnp == 'gtp':
cv = 'ipv%s' % to_intbit(apd[0:1],4,4)
return cv
def sipmsg(sipreq):
m = re.search('(^(?=[A-Z]+\s(sip|tel):)|(?<=^SIP/2\.0\s))\S{3}', sipreq)
return 'NaN' if m == None else m[0]
def deseg(recs,rno,va,bs):
fst,lst,apl = recs.loc[rno,'apmsg'], recs.loc[rno,'last'], recs.loc[rno,'appl']
sid0 = '%s%s%s%s' % (recs.loc[rno,'ip_s'], recs.loc[rno,'port_s'], recs.loc[rno,'ip_d'], recs.loc[rno,'port_d'])
if fst != 'NaN' and lst == '8':
va.append([recs.loc[rno,'pidxx'],recs.loc[rno,'apd']])
return apl
mss, flen, rl = '', 0, len(recs)
seg0 = recs.loc[rno,'segid']
if fst == 'NaN':
for i in range(1,20):
rn = rno + i
if rn >= rl: break
if recs.loc[rn,'ts_dt'][-5:] != recs.loc[rno,'ts_dt'][-5:]: break
sid = '%s%s%s%s' % (recs.loc[rn,'ip_s'], recs.loc[rn,'port_s'], recs.loc[rn,'ip_d'], recs.loc[rn,'port_d'])
seg = recs.loc[rn,'segid']
if sid+seg[:4] == sid0+seg0[:4]:
if recs.loc[rn,'apmsg'] != 'NaN':
mss, flen = recs.loc[rn,'appl'], int(seg)
break
else: mss, flen = apl, int(seg0)
if mss == '': return ''
if lst == '8' and fst == 'NaN':
if apl == mss: lst = '0'
tlen,clen = 0,0
frl = {seg0:[recs.loc[rno,'pidxx'],recs.loc[rno,'apd']]}
appl = int(apl)
if lst == '0': clen = appl
else: tlen = int(seg0)
for i in range(1,20):
rn = rno + i
if rn >= rl: break
if recs.loc[rn,'ts_dt'][-5:] != recs.loc[rno,'ts_dt'][-5:]: break
sid = '%s%s%s%s' % (recs.loc[rn,'ip_s'], recs.loc[rn,'port_s'], recs.loc[rn,'ip_d'], recs.loc[rn,'port_d'])
seg = recs.loc[rn,'segid']
if sid+seg[:4] == sid0+seg0[:4]:
fst,lst,apl = recs.loc[rn,'apmsg'], recs.loc[rn,'last'], recs.loc[rn,'appl']
if fst != 'NaN' and int(seg) > flen: break
appl += int(apl)
if fst == 'NaN' and lst == '8':
if apl == mss: lst = '0'
if lst == '0': clen += appl
else: tlen = int(seg)
frl.update({seg:[recs.loc[rn,'pidxx'], recs.loc[rn,'apd']]})
G_DUPL[G_QNO[0]] = seg
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if flen + clen == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', ''
for k,v in frs.items():
pid += '&%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return str(appl)
def deseg_s(recs,rno,va,bs):
fst,lst,apl = recs.loc[rno,'apmsg'], recs.loc[rno,'last'], recs.loc[rno,'appl']
sid0 = '%s%s%s%s' % (recs.loc[rno,'ip_s'], recs.loc[rno,'port_s'], recs.loc[rno,'ip_d'], recs.loc[rno,'port_d'])
if fst != 'NaN' and lst == '1':
va.append([recs.loc[rno,'pidxx'],recs.loc[rno,'apd']])
return apl
rl = len(recs)
seg0 = recs.loc[rno,'segid']
frl = {seg0:[recs.loc[rno,'pidxx'],recs.loc[rno,'apd']]}
appl = int(apl)
flen = 0 if fst == 'NaN' else int(seg0)
tlen = 0 if lst == '0' else int(seg0)
for i in range(1,20):
rn = rno + i
if rn >= rl: break
tid = int(recs.loc[rn,'segid'])
if abs(tid - int(seg0)) < 10:
if recs.loc[rn,'last'] == '1' and tlen == 0: tlen = tid
elif recs.loc[rn,'apmsg'] != 'NaN' and flen == 0: flen = tid
if flen > 0 and tlen > 0: break
if tlen == 0 or flen == 0: return ''
j = 0
for i in range(1,20):
rn = rno + i
if rn >= rl: break
if recs.loc[rn,'ts_dt'][-5:] != recs.loc[rno,'ts_dt'][-5:]: break
sid = '%s%s%s%s' % (recs.loc[rn,'ip_s'], recs.loc[rn,'port_s'], recs.loc[rn,'ip_d'], recs.loc[rn,'port_d'])
seg = recs.loc[rn,'segid']
if flen <= int(seg) <= tlen and sid == sid0:
appl += int(recs.loc[rn,'appl'])
j += 1
frl.update({seg:[recs.loc[rn,'pidxx'], recs.loc[rn,'apd']]})
G_DUPL[G_QNO[0]] = seg
G_QNO[0] = 0 if G_QNO[0] == bs else G_QNO[0] + 1
if flen + j == tlen: break
frs = dict(sorted(frl.items()))
pid, ipd = '', ''
for k,v in frs.items():
pid += '&%s' % v[0]
ipd += v[1]
va.append([pid[1:],ipd])
return str(appl)
def pcap_udp(recs,rno,va):
va.append([recs.loc[rno,'pidxx'],recs.loc[rno,'apd']])
return recs.loc[rno,'appl']
def siphdr(sipmsg,sepr,eepr):
hdr = ''
m = re.search(sepr, sipmsg)
if m != None:
fs = m.end()
m = re.search(eepr, sipmsg[fs:])
fe = fs + 10 if m == None else fs + m.start()
hdr = sipmsg[fs:fe]
return hdr
def dumm(ep):
return ep
def conc(varr,d):
return d.join(va for va in varr if va != '')
def repstr(val,ostr,nstr):
return re.sub(ostr,nstr,val)
def x_col(val,dp,sp,ep,dl):
return dl.join(r[sp:ep].strip() for r in val.split(dp))
def steps(n,s,d=','):
cv = d.join('%s&&%s' % (x-s,x-1) for x in range(s,n+1,s))
l = n % s
if l > 0: cv += '%s%s&&%s' % ('' if cv == '' else ',', n-l, n-1)
return cv
def x_grp(va,k,fc=['G'],dp=',',d=','):
data = {}
data.update({k:va[k].split(dp)})
if fc[0] == 'G':
df = pd.DataFrame(data).sort_values(k).groupby(k).count()
return d.join(idx for idx, row in df.iterrows())
data.update({fc[1]:va[fc[1]].split(dp) if fc[0] != 'S' else list(map(int, va[fc[1]].split(dp)))})
if fc[0] == 'C':
df = pd.DataFrame(data).sort_values(k).groupby(k).count()
elif fc[0] == 'S':
df = pd.DataFrame(data).sort_values(k).groupby(k).sum()
elif fc[0] == 'F':
df = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pandas as pd
import plotly.tools as tls
import plotly.graph_objs as go
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy import stats
from ..graph import ScatterGraph, SubplotsGraph, BarGraph, HeatmapGraph
def _group_return(pred_label: pd.DataFrame = None, reverse: bool = False, N: int = 5, **kwargs) -> tuple:
"""
:param pred_label:
:param reverse:
:param N:
:return:
"""
if reverse:
pred_label["score"] *= -1
pred_label = pred_label.sort_values("score", ascending=False)
# Group1 ~ Group5 only consider the dropna values
pred_label_drop = pred_label.dropna(subset=["score"])
# Group
t_df = pd.DataFrame(
{
"Group%d"
% (i + 1): pred_label_drop.groupby(level="datetime")["label"].apply(
lambda x: x[len(x) // N * i : len(x) // N * (i + 1)].mean()
)
for i in range(N)
}
)
t_df.index = | pd.to_datetime(t_df.index) | pandas.to_datetime |
import pandas as pd
import os
import numpy as np
from math import floor
from math import ceil
import soundfile as sf
import ML.Utils.InvalidParameterError as InvalidParameterError
def convert_to_letters(num):
"""Converts number to string of capital letters in Excel column name
fashion, i.e. 1 -> A, 26 -> Z, 27 -> AA ...."""
string_equiv = ""
while num > 0:
currNum = (num - 1) % 26
num = (num - 1) // 26
string_equiv = chr(currNum + ord("A")) + string_equiv
return string_equiv
def get_file_name(file_path, timestamp, outDir):
file_name_without_ext = os.path.split(file_path)[-1] + "_Timestamp_{}".format(timestamp)
file_name_without_ext = os.path.join(outDir, file_name_without_ext)
file_extension = os.path.splitext(file_path)[-1]
outpath = file_name_without_ext + file_extension
i = 0
while os.path.exists(outpath):
i += 1
outpath = file_name_without_ext + "({})".format(i) + file_extension
with sf.SoundFile(file_path, "r") as f:
sr = f.samplerate
channels = f.channels
start = int(max((timestamp - 2.5) * sr, 0))
end = int(min((timestamp + 2.5) * sr, len(f)))
f.seek(start)
data = f.read(frames=(end - start), always_2d=True)
with sf.SoundFile(outpath, "w", samplerate=sr, channels=channels) as f:
f.write(data)
return outpath
def create_flutter_output(outFrame, outDir):
"""
Auxiliary Function to create output for Flutter Frontend.
"""
map_from_type_to_files_with_secs = {}
for i in outFrame.index:
if outFrame["CallType"][i] not in map_from_type_to_files_with_secs.keys():
map_from_type_to_files_with_secs[outFrame["CallType"][i]] = []
map_from_type_to_files_with_secs[outFrame["CallType"][i]].append(
(outFrame["FullPath"][i], outFrame["SecondDetected"][i])
)
final_output = {
"CallType": [],
"Number": [],
"File1": [],
"File2": [],
"File3": [],
"File4": [],
"File5": [],
}
FileKeys = ["File1", "File2", "File3", "File4", "File5"]
for callType in map_from_type_to_files_with_secs.keys():
final_output["CallType"].append(callType)
final_output["Number"].append(str(len(map_from_type_to_files_with_secs[callType])))
if len(map_from_type_to_files_with_secs[callType]) < 5:
for i in range(len(map_from_type_to_files_with_secs[callType])):
file_name = get_file_name(
*(map_from_type_to_files_with_secs[callType][i]), outDir
)
final_output[FileKeys[i]].append(file_name)
for i in range(len(map_from_type_to_files_with_secs[callType]), 5):
final_output[FileKeys[i]].append(" ")
else:
idx = np.random.choice(
len(map_from_type_to_files_with_secs[callType]), 5, replace=False
)
for i in range(5):
file_name = get_file_name(
*(map_from_type_to_files_with_secs[callType][idx[i]]), outDir
)
final_output[FileKeys[i]].append(file_name)
final_output = | pd.DataFrame.from_dict(final_output) | pandas.DataFrame.from_dict |
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def load_dataset_file(dataset_file_path):
"""
This method loads dataset file.
Currently supported formats are .csv and .json.
:param dataset_file_path: path of the dataset file
:return: pandas dataframe that contains dataset information
"""
_, file_extension = os.path.splitext(dataset_file_path)
if file_extension == ".csv":
data_info = | pd.read_csv(dataset_file_path) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
class TestDataFrameAppend:
def test_append_empty_list(self):
# GH 28769
df = DataFrame()
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df # .append() should return a new object
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
series = df.loc[4]
msg = "Indexes have overlapping values"
with pytest.raises(ValueError, match=msg):
df.append(series, verify_integrity=True)
series.name = None
msg = "Can only append a Series if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True
)
tm.assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
tm.assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True
)
tm.assert_frame_equal(result, expected.loc[:, result.columns])
msg = "Can only append a dict if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series.to_dict())
# can append when name set
row = df.loc[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
tm.assert_frame_equal(result, expected)
# different columns
dicts = [
{"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4},
{"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8},
]
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_append_missing_cols(self):
# GH22252
# exercise the conditional branch in append method where the data
# to be appended is a list and does not contain all columns that are in
# the target DataFrame
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [{"foo": 9}, {"bar": 10}]
with tm.assert_produces_warning(None):
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame()
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": "foo"}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]})
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([ | Timestamp("20130101") | pandas.Timestamp |
import os, glob
import pandas as pd
path = "/Users/saslan.19/Desktop/Programming/Music Recommendation/RYMScraper/examples/Exports"
all_files = sorted(glob.glob(os.path.join(path, "*.csv")))
print(all_files)
df_from_each_file = (pd.read_csv(f, sep=None) for f in all_files)
df_merged = | pd.concat(df_from_each_file, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 5 12:51:54 2016
@author: tkc
"""
import os, re, glob
import pandas as pd
import numpy as np
from math import factorial # used by Savgol matrix
from io import StringIO
import datetime
from scipy import optimize
def rangefromstring(x):
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
def unpackfitregs(df):
''' Loaded data frame has ev range, list of background regions and fit type
unpack from dataframe into list of each
1) fitrange is total ev range (i.e.0-100), 2 )backgroundregions are index #s (or energy in eV) of regions commonly without peaks
3) fittype (mostly parabola) and 4) threshold for derivative knockout
add overlap range between adjacent fits'''
Fitregions=[]
overlapregs=[] # list of lists containing adjacent overlapping index ranges
# TODO test to ensure that normal eV range corresponds to range of indices
for i in range(0,len(df)):
tempstr=df.iloc[i]['Backgroundregs']
indexrange=rangefromstring(tempstr) # converts string describing range to actual range
Fitregions.append([df.iloc[i]['Fitrange'],indexrange,df.iloc[i]['Fittype'], df.iloc[i]['Threshold']])
return Fitregions
def makesavgol(df):
'''Perform python smooth-diff used to guide selection of background regions for SEM-EDX spectra
'''
df['Savgol']=0.0 # add/initialize col for 2nd deriv Sav-gol
thisdim=len(df)
thisreg=df['Counts'] # convert to Series (keep these index)
myarr=np.asarray(thisreg) # convert to numpy array
window_size=11
deriv=2
order=2 # order of savgol fit
rate=1
order_range = range(order+1) # range object
half_window = (window_size -1) // 2 # type int
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# b is matrix 3 by window size
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) # series as long as array
# linalg.pinv gets pseudo-inverse of a matrix (window-sized series)
# .A of any matrix returns it as ndarray object
# Pad the signal at the extremes with values taken from the signal itself
firstvals = myarr[0] - np.abs(myarr[1:half_window+1][::-1] - myarr[0] )
lastvals = myarr[-1] + np.abs(myarr[-half_window-1:-1][::-1] - myarr[-1])
myarr= np.concatenate((firstvals, myarr, lastvals))
# Now convolve input signal and sav-gol processing 1D array .. thisreg is numpy array w/ savgol results
myarr=np.convolve( myarr, m[::-1], mode='valid')
thisreg=pd.Series(myarr) # convert array to series
thisreg.loc[0:thisdim]=myarr # copies numpy array but keeps same indices
df['Savgol']=thisreg # copy deriv column to dataframe
return df # returns savitsky-golay smooth diff over same full region
def openSEM(SEMfileName):
'''Open csv as dataframe if it exists or if not strip header from psmsa/emsa and import as dataframe '''
csvname=str(SEMfileName.split('.')[0])+'.csv'
try:
SEMfile=pd.read_csv(csvname, encoding='cp437')
except: # if csv doesn't exist, just open/strip psmsa
with open(SEMfileName, 'r') as file:
filedata = file.read()
filedata =filedata.split('#SPECTRUM :')[1]
filedata =filedata.split('#ENDOFDATA : ')[0]
thisdata=StringIO(filedata)
SEMfile=pd.read_csv(thisdata)
try:
SEMfile=SEMfile.drop(SEMfile.columns[[2]], axis=1) # drop erroneous 3rd column if present
except:
print('') # ignore error if 3rd column not present
SEMfile.columns=['Energy','Counts']
return SEMfile # should return data as pandas dataframe
def fitparabola(df, SEMfileName):
'''Pass appropriate chunk from Auger spectral dataframe, perform polynomial/parabola fit
return chunk with backfit column added '''
xcol=df['Energy']
ycol=df['Counts'] # Counts1, Counts2 or whatever
# find relative minimum
try:
A,B,C=np.polyfit(xcol, ycol, 2)
except: # deal with common problems with linregress
print('Fitting error from ', "{0:.2f}".format(df.Energy.min()),'to ',"{0:.2f}".format(df.Energy.max()), ' in file ', SEMfileName)
fitparams=('n/a','n/a','n/a') # return all n/a
return df, fitparams
fitparams=(A, B, C) # tuple to return coeffs of 2nd order poly fit
for index,row in df.iterrows(): # write this fit into this chunk of data (redundant?)
xval=df.loc[index]['Energy']
yval= A * xval**2+ B * xval + C
df=df.set_value(index, 'Backfit', yval)
return df, fitparams
def findfitregion(df, fitregion, threshold):
'''Passing single list of allowable index #s for background fits (no duplicates)
remove those with high from list of allowable indices any that show high smoothed-derivatives (i.e. not good for background fitting '''
Backfitdf=df.ix[[x for x in fitregion]] # filter out those not in allowable background ranges
# these are loaded from SEM_backfit_regions.csv
Backfitdf=Backfitdf.dropna(subset=['Counts']) # drops above (set to na by ix)
# now additionally filter out those with derivative above threshold value
Backfitdf=Backfitdf[(Backfitdf['Savgol']<threshold) & (Backfitdf['Savgol']>-threshold)]
return Backfitdf
def findelemregions(Elements, SEMquantparams):
''' Takes element string and returns standard Elemdata for each elem symbol containing params
needed for peak finding and quant
tuple for integ peak is symbol, ideal peak index #, and integ kfactor
don't apply energy shifts here... apply later when doing integrate'''
Elemdata=[]
try:
for i, elem in enumerate(Elements):
# find row in AESquantparams for this element
thiselemdata=SEMquantparams[(SEMquantparams['element']==elem)]
thiselemdata=thiselemdata.squeeze() # series with this elements params
# integ peak position value is relative to negpeak in smooth-diff (i.e. -5 is 5 eV below ideal negpeak)
idealindex=int((thiselemdata.energy+.01)*100) # ideal index value of SEM-EDX peak from energy in keV
kfact=thiselemdata.kfactor # typical sensitivity k-factor associated with element for integration
errkfact=thiselemdata.errkfact
mass=thiselemdata.mass
# full peak width in keV from SEMquantparams (usually 0.15keV or 15 channels at 0.1eV/chan)
width=int(((thiselemdata.fullwidth*100)-1)/2) # integration width in channels for direct integration for this element
# total # of channels in AESquantparams but include n-1/2 channels on either side of peak center (usually width is 8 channels)
#Elemdata is a list (of length number of elements) containing length5 tuples
elemtuple=(elem, idealindex, width, kfact, errkfact, mass) # add tuple with info for this element
Elemdata.append(elemtuple) # now contains proper limits on fitting regions
except:
print('Quant parameters are not properly loaded.')
return Elemdata
def fitgauss(df, halfwidth, elem, SEMfileName, savegauss=True):
''' Gaussian fit of direct peaks (pass SEMfile just around peaks region
no need to save Gaussian fit, just return width and other params
integwidth pass from AESquantparams value'''
# Remove any nan values from peak region (shouldn't be any though)
df=df.dropna(subset=['Subdata']) # remove nan entries from peak
# Estimate initial Gaussian parameters from data
xc=df['Subdata'].idxmax() # estimate center based on peak max index
xc=df.loc[xc]['Energy'] # associated energy value near center
peakarea=df['Subdata'].sum() # decent area estimate
y0=0 #
width=0.01*(2*halfwidth+1) # full width estimate in keV from half-width in channels
params0=[xc,width,peakarea,y0] # initial params list (first guess at gaussian params)
xcol=df['Energy']
ycol=df['Subdata']
xcol=xcol.as_matrix() # convert both to numpy matrices
ycol=ycol.as_matrix()
# define standard gaussian funct (xc, width, area and yoffset are init params)
gaussian=lambda params, x: params[3]+params[2]/(params[1]*np.sqrt(2*np.pi))*np.exp(-((x-params[0])**2/(2*params[1]**2)))
# thisgauss= gaussian(params0,xcol)
errfunc=lambda p, xcol, ycol: ycol- gaussian(p,xcol) # lambda error funct definition
# sigma2FWHM = lambda sigma: sigma * sqrt(2 * log(2)) * 2 / sqrt(2) # convert Gaussian widths to FWHM?
try:
fitparams, cov, infodict, mesg, ier =optimize.leastsq(errfunc,params0,args=(xcol,ycol),full_output=True)
ss_err=(infodict['fvec']**2).sum()
ss_tot=((ycol-ycol.mean())**2).sum()
rsquared=1-(ss_err/ss_tot)
except: # fitting problem
print('Gaussian fitting error for', elem, ' peak in file ', SEMfileName)
fitparams=('n/a','n/a','n/a','n/a') # return all n/a
rsquared='n/a'
ier='n/a'
return df, fitparams, rsquared, ier
if savegauss==True:
df['Gauss']='' # add col for gaussian fit
for index,row in df.iterrows():
xval=df.loc[index]['Energy']
yval=fitparams[3]+fitparams[2]/(fitparams[1]*np.sqrt(2*np.pi))*np.exp(-((xval-fitparams[0])**2/(2*fitparams[1]**2)))
df.set_value(index,'Gauss',yval)
return df, fitparams, rsquared, ier
def fitpeaks(SEMfile, Elemdata, logmatch, savegauss=True):
''' Gaussian fit of major peaks in single spectrum, shift is list of energy shifts of negpeak (same order as Eledata (opens source spectrum as SEMfile,
fits peak backgrounds above and below using Elemdata, also saves linear fit params to logdataframe with position/amplitude/etc;
desired elements out of data range are skipped (in prior findindices function)
# Saving of gaussian fits of peaks could be stored as separate csv if this was ever desired... probably not
'''
SEMfileName=logmatch.Filename # only used for error reporting
# Create temp df to hold and pass linear fit data
mycols=['Basename', 'Filenumber', 'Point','Filename', 'Filepath', 'Sample', 'Comments', 'Element',
'Xc', 'Width', 'Peakarea', 'Y0','Rsquared'] # for gaussian peak fits
mycols2=['Basename','Filenumber', 'Point', 'Filename', 'Filepath', 'Sample', 'Comments', 'Element', 'Energy', 'Shift', 'Rawcounts',
'Backcounts', 'Subtractedcounts', 'Adj counts', '% err', 'Significance' , 'Basis', 'Errbasis',] # for integration results
Peakfits=pd.DataFrame(columns=mycols) # blank df for this spectrum's peak fits
Integresults= | pd.DataFrame(columns=mycols2) | pandas.DataFrame |
import numpy as np
import pandas
if __name__ == "__main__":
max_range = 20
stats = {
"alpha": 1,
"beta": 2,
"gamma": 3,
}
series = {}
for key, step in stats.items():
indices = np.array(list(range(0, max_range, step)))
values = np.arange(len(indices))
series[key] = pandas.Series(values, indices)
data = | pandas.DataFrame(series) | pandas.DataFrame |
# coding=utf-8
import logging
import functools
import datetime
import pandas as pd
log = logging.getLogger(__name__)
BOOKING_SPREADSHEET_NAME = "Family Camp Bookings"
RESPONSE_SHEET_NAME = "Form responses 9"
INVOICE_SHEET_NAME = "Invoicing"
ACTIVITIES_SHEET_NAME = "Activities"
MAX_NUM_OF_CAMPERS = 10
I_REF = "Group Ref"
I_GROUP = "Group Name"
I_TEL = "Tel"
I_EMAIL = "Email"
I_ADDR = "Address"
I_NEED_HELP = "Need Help"
I_WILLING_TO_HELP = "Willing to Help"
I_ADULTS = "Num. Adults"
I_CHILD = "Num. Children"
I_INFANTS = "Num. Infants"
I_TENTS = "Num. Tents"
I_CARAVANS = "Num. Caravans"
I_FIELDS = [I_REF, I_GROUP, I_TEL, I_EMAIL, I_ADDR,
I_NEED_HELP, I_WILLING_TO_HELP,
I_ADULTS, I_INFANTS, I_CHILD, I_TENTS, I_CARAVANS]
I_IDX_OF_FIRST_NON_EMPTY_COLUMN = 14 # Amount Due
# Exact text used in the "willing to help" question.
WILLING_TO_HELP = "We are happy to provide help and advice "\
"to another family if they need us."
NEED_HELP = "We would like to have another family on camp to "\
"support us if we need it."
I_INVOICE_SENT = "Invoice Sent"
I_AMOUNT_DUE = "Invoiced Total"
REF = "Timestamp"
FAMILY = "Family Name"
EMAIL = "Email Address"
ADDR = "Family Address"
TEL = "Family Telephone Number"
ASSO = "Family Association with 7th Lichfield"
TENTS = "Family Number of Tents"
CARAVANS = "Family Number of Caravans or Motorhomes"
HELP = "Please indicate if you would like support or if "\
"you can provide support for another family."
HOME_CONTACT = "Home Contact Name"
HOME_NUMBER = "Home Contact Number"
GROUP_FIELDS = [REF, FAMILY, EMAIL, ADDR, TEL, TENTS,
CARAVANS, ASSO, HELP, HOME_CONTACT, HOME_NUMBER]
NAME = "First Name{}"
SURNAME = "Surname{}"
DIET = "Dietary Requirements{}"
OVER18 = "Over 18 at the start of the camp?{}"
AGE = "Age at start of camp (if under 18){}"
DBS = "DBS Status (if over 18){}"
PRI = "Primary Activities{}"
OTHER = "Other Activities{}"
BASE_CAMPER_FIELDS = [
NAME, SURNAME, DIET, OVER18, AGE, DBS, PRI, OTHER]
CAMPER_FIELDS = [field.format('') for field in BASE_CAMPER_FIELDS]
A_IDX_OF_FIRST_NON_EMPTY_COLUMN = 20 # The last activity column
# List of available actvities.
# This must be consistent with the names on the form
# and in the Activities tab on the spreadsheet.
ACTIVITIES = ['Archery',
'Blindfold Trail',
'BMX Biking',
'Canoeing',
'Caving',
'Climbing',
'Crystal Maze',
'Fire Lighting',
'Pottery Painting',
"It's a Knockout"]
A_FIELDS = [I_REF, FAMILY] + CAMPER_FIELDS + ACTIVITIES
def camper_fields(camper):
return [
f.format(camper) for f in BASE_CAMPER_FIELDS]
class Bookings(object):
"""Represents the bookings sheet that is populated by the booking form."""
def __init__(self, gs):
self._gs = gs
self._wks = self._gs.worksheet(RESPONSE_SHEET_NAME)
self._df = pd.DataFrame(data=self._wks.get_all_values()[1:],
columns=self._wks.row_values(1))
self._norm = self._normalize()
self._campers = self._get_campers()
def _normalize(self):
"Take each of the camper field and move them so that we end up with"
"a row for each camper."
# Extract each camper.
campers = [self._df[GROUP_FIELDS+camper_fields(
" (Camper {})".format(i))] for i in range(1, MAX_NUM_OF_CAMPERS+1)]
for camper in campers:
camper.columns = [GROUP_FIELDS+camper_fields('')]
norm = | pd.concat(campers) | pandas.concat |
import warnings
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def f(x):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):
Series(range(3)).rolling(1).apply(len, raw=bad_raw)
def test_rolling_apply_out_of_bounds(engine_and_raw):
# gh-1850
engine, raw = engine_and_raw
vals = Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)
expected = Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("window", [2, "2s"])
def test_rolling_apply_with_pandas_objects(window):
# 5071
df = DataFrame(
{"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)},
index= | date_range("20130101", periods=5, freq="s") | pandas.date_range |
import torch
from transformer.nn_transformer import TRANSFORMER
from downstream.model import example_classifier, RnnClassifier, FeedForwardClassifier, example_DNN
from downstream.solver import get_optimizer
from downstream.dataloader_ds import SchizophreniaMMDataset
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
from torch.utils.data import DataLoader
import re
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score
import numpy as np
from sklearn.linear_model import LogisticRegression
import torch.nn as nn
import sys
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def dict_acc(input_dict):
cnt = 0
cor = 0
for key in input_dict.keys():
if(sum(input_dict[key])/len(input_dict[key]) >= 0.5):
cor += 1
cnt += 1
return cor/cnt
def duplicate(features, n_times, d_bool=False):
if(d_bool):
return torch.cat((features.reshape(-1,1),)*n_times, dim=1)
return features
def filter_files(root_files, list_files, drugCond=None):
valid_info = []
if(drugCond is not None):
drugcond_data = pd.read_excel("data/PORQ_drugUnblinding.xlsx").values
for row in drugcond_data:
if(row[3] == drugCond):
valid_info.append(row[:-1])
output_files = []
for file in root_files:
src_info = file.split("/")[-1].split(".npy")[0]
for pos_file in list_files:
if(src_info not in pos_file):
continue
participant_id = int(re.search(r'\d{4}', file)[0])
if(participant_id >= 8000):
output_files.append(pos_file)
continue
keep = False
for drugcondinf in valid_info:
if(str(drugcondinf[0]) in pos_file and \
"Day" + str(drugcondinf[1]) in pos_file and \
str(drugcondinf[2])[:10] in pos_file):
keep = True
if(keep):
output_files.append(pos_file)
return output_files
def reverse_pred(input_list):
output_list = []
for x in range(len(input_list)):
if(x == 0):
output_list.append(1)
else:
output_list.append(0)
return output_list
#subsets = ["watch", "describe", "feel"]
subsets = ["watch", "describe", "feel"]
#model_name_dict = {
#"result/result_transformer/flm_small/states-250000.ckpt":272,
#"result/result_transformer/flm_base/states-250000.ckpt":272,
#"result/result_transformer/flm_large_1mask/best_160_save.ckpt":544,
#"result/result_transformer/flm_large/states-250000.ckpt":544,
#}
#model_name_flm = "../GoogleDrive/flm_models/states-250000.ckpt"
#model_name_au = "../GoogleDrive/flm_models/au_base.ckpt"
#model_name_gp = "../GoogleDrive/flm_models/gp_base.ckpt"
model_name_flm = "result/result_transformer/flm_base/states-250000.ckpt"
#model_name_au = "result/result_transformer/au_base/states-250000.ckpt"
#model_name_gp = "result/result_transformer/gp_base/states-250000.ckpt"
model_name_au = "result/result_transformer/au_aalbert_3L/states-200000.ckpt"
model_name_gp = "result/result_transformer/gp_base_aalbert/states-200000.ckpt"
model_name_gpau = "result/result_transformer/gpau_aalbert_3L/states-200000.ckpt"
model_name_dict = {"flm":model_name_flm, "au":model_name_au, "gp":model_name_gp, "gpau":model_name_gpau}
seeds = list(np.random.randint(0,1000,5))
drugconds = ["PL","OT"]
pretrain_option = [True,False]
sources = ["gpau"]
output = []
for seed in seeds:
for subset in subsets:
for drugcond in drugconds:
for pretrain in pretrain_option:
if(pretrain):
if(pretrain):
#dim_dict = {"flm":272, "gp":88, "au":136}
dim_dict = {"flm":272, "gp":84, "au":120, "gpau":144}
inp_dim = sum([dim_dict[x] for x in sources])
else:
dim_dict = {"flm":136, "gp":11, "au":17}
inp_dim = sum([dim_dict[x] for x in sources])
config = {
'mode' : 'classification',
'sample_rate' : 1,
'hidden_size' : 128,
'pre_linear_dims' : [20], 'post_linear_dims': [20],'drop':0.2,
'concat': 1, 'layers': 3, 'linear': False,
}
torch.manual_seed(seed)
n_fold = 10
sets = ["data/train-clean-schz_chunk_0.csv","data/train-clean-schz_chunk_1.csv"]
tables = [pd.read_csv(s, header=None) for s in sets]
table = pd.concat(tables, ignore_index=True).values
name_sets = ["data/train-clean-360_chunk_0.csv","data/train-clean-360_chunk_1.csv"]
tables_name = [ | pd.read_csv(s, header=None) | pandas.read_csv |
from utility import *
import glob
import pandas as pd
import os
def graph_construct(outputdir):
path0 = os.path.join(os.getcwd(), outputdir)
#' import processed data
files1 = glob.glob(path0 + "/count_data/*.csv")
files1.sort()
count_list = []
for df in files1:
print(df)
count_list.append(pd.read_csv(df, index_col=0))
files2 = glob.glob(path0 + "/norm_data/*.csv")
files2.sort()
norm_list = []
for df in files2:
print(df)
norm_list.append(pd.read_csv(df, index_col=0))
files3 = glob.glob(path0 + "/scale_data/*.csv")
files3.sort()
scale_list = []
for df in files3:
print(df)
scale_list.append(pd.read_csv(df, index_col=0))
files4 = glob.glob(path0 + "/label_data/*.csv")
files4.sort()
label_list = []
for df in files4:
print(df)
label_list.append(pd.read_csv(df, index_col=0))
fpath = os.path.join(path0, 'sel_features.csv')
features = pd.read_csv(fpath, index_col=False).values.flatten()
#' graph construction
import itertools
N = len(count_list)
if (N == 1):
combine = | pd.Series([(0, 0)]) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from utils.preprocessing import preprocess
from utils.kfold import get_kfold
import argparse
import os
random_state = 20191205
np.random.seed(random_state)
def main(args):
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
rpath = os.path.join(args.dir,args.rname)
uatpath = os.path.join(args.dir,args.uatname)
tpath = os.path.join(args.dir,args.tname)
apath = os.path.join(args.dir,args.aname)
ratings = pd.read_csv(rpath, sep='\t')
tags = | pd.read_csv(uatpath, sep='\t') | pandas.read_csv |
# models_persistence.py?
"""
Functions for persistence forecasts.
Two kinds of persistence are supported:
1. Persistence of observed values in :py:func:`persistence_scalar` and
:py:func:`persistence_interval`
2. Persistence of irradiance or power accounting for solar position in
:py:func:`persistence_scalar_index` and
:py:func:`persistence_interval_index` (?).
Users of intraday persistence forecasts will typically want to use
:py:func:`persistence_scalar` or :py:func:`persistence_scalar_index`.
Users of day ahead persistence forecasts will typically want to use
:py:func:`persistence_interval`.
:py:func:`persistence_interval_index`?
The functions accept a *load_data* keyword argument that allows users to
change where the functions load the observation data from. This is most
useful for users that would like to provide their own observation data
rather than using the solarforecastarbiter database.
"""
from functools import partial
import pandas as pd
from solarforecastarbiter import datamodel, pvmodel
def persistence_scalar(observation, data_start, data_end, forecast_start,
forecast_end, interval_length, interval_label,
load_data):
r"""
Make a persistence forecast using the mean value of the
*observation* from *data_start* to *data_end*.
In the example below, we use GHI to be concrete but the concept
applies to any kind of observation data. The persistence forecast
is:
.. math::
GHI_{t_f} = \overline{GHI_{t_{start}} \ldots GHI_{t_{end}}}
where :math:`t_f` is a forecast time, and the overline represents
the average of all observations that occur between
:math:`t_{start}` = *data_start* and :math:`t_{end}` = *data_end*.
Parameters
----------
observation : datamodel.Observation
data_start : pd.Timestamp
Observation data start. Forecast is inclusive of this instant if
observation.interval_label is *beginning* or *instant*.
data_end : pd.Timestamp
Observation data end. Forecast is inclusive of this instant if
observation.interval_label is *ending* or *instant*.
forecast_start : pd.Timestamp
Forecast start. Forecast is inclusive of this instant if
interval_label is *beginning* or *instant*.
forecast_end : pd.Timestamp
Forecast end. Forecast is inclusive of this instant if
interval_label is *ending* or *instant*.
interval_length : pd.Timedelta
Forecast interval length
interval_label : str
instant, beginning, or ending
load_data : function
A function that loads the observation data. Must have the
signature load_data(observation, data_start, data_end) and
properly account for observation interval label.
Returns
-------
forecast : pd.Series
The persistence forecast.
"""
obs = load_data(observation, data_start, data_end)
persistence_quantity = obs.mean()
closed = datamodel.CLOSED_MAPPING[interval_label]
fx_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
fx = | pd.Series(persistence_quantity, index=fx_index) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = pd.Series([], dtype='float')
x_array_out = pd.Series([], dtype='float')
y_array_out = pd.Series([], dtype='float')
try:
expected_result_x = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016]
expected_result_y = [0.364712246,0.351507467,0.339214283,0.316974687,0.279954504,0.225948786,0.159949625,
0.123048839,0.099781801,0.071666234,0.056352938,0.03860139,0.029600805,0.024150524,
0.020550354,0.01795028,0.015967703,0.014467663,0.013200146,0.01215011,0.011300098,
0.010550085,0.009905072,0.009345065,0.008845057,0.008400051,0.008000046,0.007635043,
0.007300039,0.007000034,0.006725033,0.00646503,0.006230027,0.006010027,0.005805023,
0.005615023,0.005435021,0.00527002,0.00511002,0.004960017,0.004820017,0.004685016,
0.004560015,0.004440015,0.004325013,0.004220012,0.004120012,0.004020012,0.003925011,
0.003835011,0.00375001,0.00367001,0.00359001,0.00351001,0.003435009,0.003365009,
0.003300007,0.003235009,0.003170007,0.003110007,0.003055006,0.003000007,0.002945006,
0.002895006,0.002845006,0.002795006,0.002745006,0.002695006,0.002650005,0.002610005,
0.002570005,0.002525006,0.002485004,0.002450005,0.002410005,0.002370005,0.002335004,
0.002300005,0.002265004,0.002235004,0.002205004,0.002175004,0.002145004,0.002115004,
0.002085004,0.002055004,0.002025004,0.002000002,0.001975004,0.001945004,0.001920002,
0.001900002,0.001875004,0.001850002,0.001830002,0.001805004,0.001780002,0.001760002,
0.001740002,0.001720002,0.001700002,0.001680002,0.001660002,0.001640002,0.001620002,
0.001605001,0.001590002,0.001570002,0.001550002,0.001535001,0.001520002,0.001500002,
0.001485001,0.001470002,0.001455001,0.001440002,0.001425001,0.001410002,0.001395001,
0.001385001,0.001370002,0.001355001,0.001340002,0.001325001,0.001315001,0.001305001,
0.001290002,0.001275001,0.001265001,0.001255001,0.001245001,0.001230002,0.001215001,
0.001205001,0.001195001,0.001185001,0.001175001,0.001165001,0.001155001,0.001145001,
0.001135001,0.001125001,0.001115001,0.001105001,0.001095001,0.001085001,0.001075001,
0.001065001,0.00106,0.001055001,0.001045001,0.001035001,0.001025001,0.001015001,
0.001005001,0.0009985,0.000993001,0.000985001,0.000977001,0.000969501]
expected_result_npts = 160
x_dist = 6.56
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.scenario_name = 'ground_low_vf'
agdrift_empty.num_db_values = 161
x_array_in = agdrift_empty.get_distances(agdrift_empty.num_db_values)
y_array_in = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name, agdrift_empty.num_db_values)
x_array_out, y_array_out, npts_out = agdrift_empty.generate_running_avg(agdrift_empty.num_db_values,
x_array_in, y_array_in, x_dist)
# write output arrays to excel file -- just for debugging
agdrift_empty.write_arrays_to_csv(x_array_out, y_array_out, "output_array_generate.csv")
npt.assert_array_equal(expected_result_npts, npts_out, verbose=True)
npt.assert_allclose(x_array_out, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(y_array_out, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
pass
tab1 = [x_array_out, expected_result_x]
tab2 = [y_array_out, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print('expected {0} number of points and got {1} points'.format(expected_result_npts, npts_out))
print("x_array result/x_array_expected")
print(tabulate(tab1, headers='keys', tablefmt='rst'))
print("y_array result/y_array_expected")
print(tabulate(tab2, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg1(self):
"""
:description creates a running average for a specified x axis width (e.g., 7-day average values of an array)
:param x_array_in: array of x-axis values
:param y_array_in: array of y-axis values
:param num_db_values: number of points in the input arrays
:param x_array_out: array of x-zxis values in output array
:param y_array_out: array of y-axis values in output array
:param npts_out: number of points in the output array
:param x_dist: width in x_axis units of running weighted average
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE This test uses a uniformly spaced x_array and monotonically increasing y_array
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = | pd.Series([], dtype='float') | pandas.Series |
import pandas as pd
import numpy as np
#RUN WITH PYTHON 3
#<NAME>. and <NAME>., 1994. An introduction to the bootstrap. CRC press.
#Bootstrap hypothesis testing
def boostrapping_CI(metric, data ,nbr_runs=1000, verbose=False):
if verbose:
print("Computing bootstrap confidence intervals...")
nbr_scans = len(data.index)
list_results = []
# compute metric for each bootstrapped subset
for r in range(nbr_runs):
# sample random indexes
ind = np.random.randint(nbr_scans ,size=nbr_scans)
# select random subset
data_bootstrapped = data.iloc[ind]
# compute metrics
result = metric(data_bootstrapped)
list_results.append(result)
# store variable in dictionary
metric_stats = dict()
metric_stats['avg_metric'] = np.average(list_results)
metric_stats['metric_ci_lb'] = np.percentile(list_results, 5)
metric_stats['metric_ci_ub'] = np.percentile(list_results, 95)
if verbose:
print("Bootstrap confidence intervals computed.")
return metric_stats
def bootstrap(metric, data_method1, data_method2, nbr_runs=100000, compute_bounds=True, verbose=False):
# reset index
data_method1_reindexed = data_method1.reset_index(drop=True)
data_method2_reindexed = data_method2.reset_index(drop=True)
# get length of each data
n = len(data_method1_reindexed.index)
m = len(data_method2_reindexed.index)
total = n + m
# compute the metric for both methods
result_method1 = metric(data_method1_reindexed)
result_method2 = metric(data_method2_reindexed)
# compute statistic t
t = abs(result_method1 - result_method2)
# merge data from both methods
data = | pd.concat([data_method1_reindexed, data_method2_reindexed]) | pandas.concat |
import time
import numpy as np
from collections import deque
import torch
import pandas as pd
from utils.helpers import reset_experience
def evaluator(args,
global_logs,
evaluator_logs,
env_prototype,
model_prototype,
dqn):
# env
env = env_prototype(args.env_params, 0)
env.eval()
# model
local_device = torch.device('cuda')#('cpu')
evaluate_dqn = model_prototype(args.model_params,
args.norm_val,
args.state_shape,
args.action_space,
args.action_shape).to(local_device)
evaluate_dqn.load_state_dict(dqn.state_dict())
evaluate_dqn.eval()
torch.set_grad_enabled(False)
last_eval_learner_step = 0
eval_csv_logs = []
while global_logs.learner_step.value < args.agent_params.steps:
if global_logs.learner_step.value % 10000 == 0 and global_logs.learner_step.value > last_eval_learner_step:
eval_record = [] # [step, eva 1, eva 2, ..., eva n, eva aver]
last_eval_learner_step = global_logs.learner_step.value
eval_record.append(last_eval_learner_step)
# sync global model to local
evaluate_dqn.load_state_dict(dqn.state_dict())
evaluate_dqn.eval()
# main control loop
experience = reset_experience()
# counters
step = 0
episode_steps = 0
episode_reward = 0.
total_steps = 0
total_reward = 0.
nepisodes = 0
nepisodes_solved = 0
# flags
flag_reset = True # True when: terminal1 | episode_steps > self.early_stop
while nepisodes < args.agent_params.evaluator_nepisodes:
# deal w/ reset
if flag_reset:
# reset episode stats
episode_steps = 0
episode_reward = 0.
# reset game
experience = env.reset()
assert experience.state1 is not None
# flags
flag_reset = False
with torch.no_grad():
action, qvalue, max_qvalue, qvalues = evaluate_dqn.get_action(experience.state1, 0, device=local_device)
experience = env.step(action)
if experience.terminal1:
nepisodes_solved += 1
flag_reset = True
if args.env_params.early_stop and (episode_steps + 1) >= args.env_params.early_stop:
flag_reset = True
# update counters & stats
step += 1
episode_steps += 1
episode_reward += experience.reward
if flag_reset:
nepisodes += 1
total_steps += episode_steps
total_reward += episode_reward
eval_record.append(episode_reward)
eval_record.append(total_reward / nepisodes)
eval_csv_logs.append(eval_record)
print("evaluation {}".format(eval_record))
df = pd.DataFrame(data=eval_csv_logs)
df.to_csv(args.evaluation_csv_file)
print("csv log was saved as {}".format(args.evaluation_csv_file))
# report stats
# push local stats to logger
with evaluator_logs.logger_lock.get_lock():
evaluator_logs.total_steps.value = total_steps
evaluator_logs.total_reward.value = total_reward
evaluator_logs.nepisodes.value = nepisodes
evaluator_logs.nepisodes_solved.value = nepisodes_solved
evaluator_logs.logger_lock.value = True
# save model
# print("Saving model " + args.model_name + " ...")
# torch.save(local_model.state_dict(), args.model_name)
# print("Saved model " + args.model_name + ".")
df = | pd.DataFrame(data=eval_csv_logs) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 13:19:04 2019
@author: yujijun
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import pyBigWig
import collections
import scipy.stats as ss
# input index and sample name information
basic_path = "/Users/yujijun/Documents/01-Work/02-HiglassProject/03-interpret/Output/index_info" #
index_name = "Blood_index.txt"
info_name = "Blood_info.txt"
blood_index = pd.read_csv("%s/%s" %(basic_path,index_name),sep='\t',header=None,index_col=None)
blood_info = pd.read_csv("%s/%s" %(basic_path,info_name),sep='\t',header=None,index_col=None)
data = {"index":blood_index.iloc[:,0],"info":blood_info.iloc[:,0]}
df = pd.DataFrame(data)
choose_sample_20 = df.iloc[20,:]
#input blood92 chromosome and choose special sample
blood92_path = "/Users/yujijun/Documents/01-Work/02-HiglassProject/03-interpret/Output/bedfile_1000_blood92"
blood92_chr1_name = "bedfile_1000_Blood92_chr1.csv"
input_blood92 = pd.read_csv("%s/%s" %(blood92_path,blood92_chr1_name),sep='\t',header=0,index_col=None)
start_index = input_blood92[input_blood92.start==62780000].index.values[0]
end_index = input_blood92[input_blood92.start==62820000].index.values[0]
choose_data = input_blood92.iloc[list(range(start_index,end_index)),27]
fig, ax = plt.subplots(figsize=(15,5))
x = np.arange(start_index*1000, (end_index)*1000,1000)
plt.plot(x,choose_data,color="#FF8C00")
plt.xticks(rotation=90)
#ax.xaxis.set_major_locator(MultipleLocator(1))
#
#handle the TF data
input_path = "/Users/yujijun/Documents/01-Work/02-HiglassProject/03-interpret/Input_file/DNase_bw_v2"
sample_ID_list = [35477,36245,42217,43825,4550,46156,53417]
correlation_list = []
for i in sample_ID_list:
bigwig_file = "%s/%s_treat.bw" %(input_path,str(i))
print(bigwig_file)
bw = pyBigWig.open(bigwig_file)
resolution = 1000
chromo = "chr1"
chromo_values = bw.values(chromo,0,bw.chroms(chromo))
chromo_values_split1000 = [chromo_values[i:i + resolution] for i in range(start_index*1000, end_index*1000, resolution)]
def listmean(x):
x_mean = np.mean(x)
return(x_mean)
chromo_values_split1000_mean = list(map(listmean,chromo_values_split1000))
whole_mean = np.nansum(chromo_values)/len(chromo_values)
new_series = pd.Series(chromo_values_split1000_mean)/whole_mean
new_list = new_series.tolist()
#print(chromo_values_split16_mean[0:10])
chromo_values_split1000_mean_norm = new_list #add speed
correlation = np.corrcoef(choose_data,chromo_values_split1000_mean_norm)
correlation_list.append(correlation[1,0])
bw.close()
data = {"sample_ID":sample_ID_list, "correlation":correlation_list}
df_correlation = pd.DataFrame(data,index=range(len(sample_ID_list)))
df_correlation.sort_values("correlation",inplace=True,ascending=False)
#input summmit file
sample_ID_list = [35477,36245,42217,43825,4550,46156,53417]
correlation_list = []
for i in sample_ID_list:
summit_file = "%s/%s_sort_summits.bed" %(input_path,str(i))
summit = pd.read_csv(summit_file,header=None, sep='\t')
summit_chr1 = summit[summit.iloc[:,0] == "chr1"]
summit_chr1 = summit_chr1[(start_index*1000) <= summit_chr1.iloc[:,1]]
summit_chr1 = summit_chr1[summit_chr1.iloc[:,1] <= (end_index*1000)]
peak_list = summit_chr1.iloc[:,1].tolist()
x = np.arange(start_index*1000, (end_index)*1000,1000)
peak_data = [0]*len(x)
for i in peak_list:
for j in range(len(x)-1):
if i>=x[j] and i<x[j+1]:
index_i = summit_chr1[summit_chr1.iloc[:,1] == i].index.values[0]
peak_data[j] = summit_chr1.ix[index_i,4]
correlation = np.corrcoef(choose_data,peak_data)
correlation_list.append(correlation[1,0])
data = {"sample_ID":sample_ID_list, "correlation":correlation_list}
df_correlation = pd.DataFrame(data,index=range(len(sample_ID_list)))
df_correlation.sort_values("correlation",inplace=True,ascending=False)
#define some of the function:
def summit_allmean(sample_ID_list):
summit_allmean_list = []
for i in sample_ID_list:
summit_file = "%s/%s_sort_summits.bed" %(input_path,str(i))
summit = | pd.read_csv(summit_file,header=None, sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Extract offshore inputs from exclusion layers
"""
import logging
import numpy as np
import pandas as pd
from scipy.ndimage import center_of_mass
from scipy.spatial import cKDTree
from warnings import warn
from reV.handlers.exclusions import ExclusionLayers
from reV.utilities.exceptions import MultiFileExclusionError
from reVX.utilities.utilities import log_versions, coordinate_distance
from rex.resource import Resource
from rex.utilities.utilities import parse_table, get_lat_lon_cols
logger = logging.getLogger(__name__)
class OffshoreInputs(ExclusionLayers):
"""
Class to extract offshore inputs from offshore inputs .h5 at desired
offshore site gids. Mapping is based on the techmapping dataset (tm_dset).
Offshore input values are taken from the array pixel closest to the
center of mass of each offshore site gid.
"""
DEFAULT_INPUT_LAYERS = {
'array_efficiency': 'aeff',
'bathymetry': 'depth',
'dist_to_coast': 'dist_s_to_l',
'assembly_areas': 'dist_a_to_s',
'ports_operations': 'dist_op_to_s',
'ports_construction': 'dist_p_to_s',
'ports_construction_nolimits': 'dist_p_to_s_nolimit',
'weather_downtime_fixed_bottom': 'fixed_downtime',
'weather_downtime_floating': 'floating_downtime',
'weather_downtime_mean_wave_height_bouy': 'hs_average'
}
def __init__(self, inputs_fpath, offshore_sites, tm_dset='techmap_wtk'):
"""
Parameters
----------
inputs_fpath : str
Path to offshore inputs .h5 file
offshore_sites : str | list | tuple | ndarray |pandas.DataFrame
- Path to .csv|.json file with offshore sites meta data
- Path to a WIND Toolkit .h5 file to extact site meta from
- List, tuple, or vector of offshore gids
- Pre-extracted site meta DataFrame
tm_dset : str, optional
Dataset / layer name for wind toolkit techmap,
by default 'techmap_wtk'
"""
log_versions(logger)
super().__init__(inputs_fpath)
self._offshore_meta = self._create_offshore_meta(offshore_sites,
tm_dset)
def __repr__(self):
msg = "{} from {}".format(self.__class__.__name__, self.inputs_fpath)
return msg
@property
def inputs_fpath(self):
"""
.h5 file containing offshore input layers
Returns
-------
str
"""
return self.h5_file
@property
def meta(self):
"""
Offshore site meta data including mapping to input layer row and column
index
Returns
-------
pandas.DataFrame
"""
return self._offshore_meta
@property
def lat_lons(self):
"""
Offshore sites coordinates (lat, lons)
Returns
-------
ndarray
"""
lat_lon_cols = get_lat_lon_cols(self.meta)
return self.meta[lat_lon_cols].values
@property
def row_ids(self):
"""
Input layer array row ids that correspond to desired offshore sites
Returns
-------
ndarray
"""
return self.meta['row_idx'].values
@property
def column_ids(self):
"""
Input layer array column ids that correspond to desired offshore sites
Returns
-------
ndarray
"""
return self.meta['col_idx'].values
@staticmethod
def _parse_offshore_sites(offshore_sites):
"""
Load offshore sites from disc if needed
Parameters
----------
offshore_sites : str | list | tuple | ndarray |pandas.DataFrame
- Path to .csv|.json file with offshore sites meta data
- Path to a WIND Toolkit .h5 file to extact site meta from
- List, tuple, or vector of offshore gids
- Pre-extracted site meta DataFrame
Returns
-------
offshore_sites : pandas.DataFrame
Offshore sites meta data
"""
if isinstance(offshore_sites, str):
if offshore_sites.endswith('.h5'):
with Resource(offshore_sites) as f:
offshore_sites = f.meta
if offshore_sites.index.name == 'gid':
offshore_sites = offshore_sites.reset_index()
else:
offshore_sites = parse_table(offshore_sites)
elif isinstance(offshore_sites, (tuple, list, np.ndarray)):
offshore_sites = | pd.DataFrame({'gid': offshore_sites}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1=pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1)
dataset_train2=pd.concat([dataset_train1,dataset_train["pickup_datetime"]],axis=1)
#dataset_train2.isna().sum()
data=[dataset_train2,dataset_test]
for i in data:
i["year"]=i["pickup_datetime"].apply(lambda row:row.year)
i["month"]=i["pickup_datetime"].apply(lambda row:row.month)
i["day_of_week"] = i["pickup_datetime"].apply(lambda row: row.dayofweek)
i["hour"] = i["pickup_datetime"].apply(lambda row: row.hour)
# train2_nodummies=dataset_train2.copy()
# dataset_train2=train2_nodummies.copy()
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2["year"])
# plt.savefig('year.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['month'])
# plt.savefig('month.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['day_of_week'])
# plt.savefig('day_of_week.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['hour'])
# plt.savefig('hour.png')
plt.show
#Now we will use month,day_of_week,hour to derive new features like sessions in a day,seasons in a year,week:weekend/weekday
# for sessions in a day using hour columns
def f(x):
if(x>=5) and (x<=11):
return "morning"
elif (x>=12) and (x<=16):
return "afternoon"
elif (x>=17) and (x<=20):
return "evening"
elif (x>=21) and (x<=23):
return "night pm"
elif (x>=0) and (x<=4):
return "night am"
dataset_train2["sessions"]=dataset_train2["hour"].apply(f)
dataset_test['session'] = dataset_test['hour'].apply(f)
#for seasons in a year using month column
def g(x):
if (x>=3) and (x<=5):
return "spring"
elif (x>=6) and (x<=8):
return "summer"
elif (x>=9) and (x<=11):
return "fall"
else :
return "winter"
dataset_train2['seasons'] = dataset_train2['month'].apply(g)
dataset_test['seasons'] = dataset_test['month'].apply(g)
#for week / weekend in a day of week columns
def h(x):
if (x>=0) and (x<=4):
return "weekday"
elif (x>=5) and (x<=6):
return "weekend"
dataset_train2['week'] = dataset_train2['day_of_week'].apply(h)
dataset_test['week'] = dataset_test['day_of_week'].apply(h)
dataset_train2['passenger_count'].describe()
dataset_train2.isnull().sum()
dataset_test.isna().sum()
#creating dummy varibale
temp=pd.get_dummies(dataset_train2["passenger_count"],prefix="passenger_count")
dataset_train2=dataset_train2.join(temp)
temp = pd.get_dummies(dataset_test['passenger_count'], prefix = 'passenger_count')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_test['seasons'], prefix = 'seasons')
dataset_test = dataset_test.join(temp)
temp=pd.get_dummies(dataset_train2["seasons"],prefix = "season" )
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_train2['week'], prefix = 'week')
dataset_train2= | pd.concat([dataset_train2,temp],axis=1) | pandas.concat |
import os
from pathlib import Path
import pandas as pd
def load_tables():
##### LOAD THE RAW DATA #####
df = pd.DataFrame()
parent = Path(__file__).parent.parent
DATA_DIR = os.path.join(parent, 'data')
WEEKDAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for f in os.listdir(DATA_DIR):
if f.endswith('.csv') and '20210201' not in f:
in_file = os.path.join(DATA_DIR, f)
df = df.append(pd.read_csv(in_file, parse_dates=['Date Time (UTC)']))
df = df.reset_index().drop('index', axis=1)
df = df[df['DEX'] == 'Uniswap v2']
weekday_map = dict(enumerate(WEEKDAYS, start=1))
df['Weekday'] = df['Date Time (UTC)'].apply(lambda row: weekday_map[row.isoweekday()])
df['Hour'] = df['Date Time (UTC)'].dt.hour
df['Weekday + Hour'] = df['Weekday'] + '-' + df['Hour'].astype(str)
##### AGGREATE BY WEEKDAY #####
weekday_df = pd.DataFrame()
weekday_df = weekday_df.append(df[df['Token (In)'] == ' HOPR'].groupby('Weekday').sum()[['Amount (In)']])
weekday_df = weekday_df.join(df[df['Token (Out)'] == ' HOPR'].groupby('Weekday').sum()[['Amount (Out)']])
weekday_df['Volume (HOPR)'] = weekday_df['Amount (In)'] + weekday_df['Amount (Out)']
weekday_df = weekday_df.loc[WEEKDAYS]
weekday_df['Weekday'] = weekday_df.index
transaction_count = [len(df[df['Weekday'] == day]) for day in WEEKDAYS]
weekday_df['Transaction Count'] = transaction_count
##### AGGREATE BY HOUR #####
hour_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 10:49:24 2020
@author: RC
Validation test for comparing model outputs from python and cpp stationsim.
We do this as follows:
- Generate two random samples of models
- Calculate corresponding Ripley's K (RK) curves for each model
- Generate a data frame for the two populations of RK curves
- Save said frame and analyse using panel regression in R.
- Analysis determines whether the two groups are statistically indistinguishable.
"""
import numpy as np
import sys
import os
import multiprocessing
from astropy.stats import RipleysKEstimator # astropy's ripley's K
import pandas as pd
import glob
import datetime
sys.path.append("..")
from stationsim_model import Model #python version of stationsim
import matplotlib.pyplot as plt
from seaborn import kdeplot
class HiddenPrints:
"""stop unnecessary printing from stationsim
We get a lot of `iterations : X` prints for a large number of
stationsim runs. This stops the printing for tidiness.
https://stackoverflow.com/questions/8391411/suppress-calls-to-print-python
"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
class stationsim_RipleysK():
""" Class for calculating Ripley' K curves on stationsim collisions
and saving them as pandas dataframes.
"""
def generate_Model_Sample(self, n_runs, model_params, single_process = False):
""" function for generating stationsim model runs to test
Parameters
------
n_runs : int
`n_runs`number of stationsim runs to generate.
Must be positive intiger.
model_params : dict
`model_params` dictionary of model parameters required for
stationsim to run. See stationsim_model.py for more details.
single_process : bool (default False)
whether to run the models as a single process or using
multiple processes simultaneously.
Returns
------
models : list
a list of completed stationsim `models` given the required
width, height, pop_total, and gate_speed.
"""
#placeholder list
models = []
if n_runs > 1 and model_params["random_seed"] != None:
raise Exception("Error: the 'random_seed' parameter is not None\
which means that all models generate the same results, which\
I'm sure isn't what you want!")
elif n_runs < 1:
raise Exception("Error: need one or more 'n_runs', not {}".format(n_runs))
#supress excessive printing
with HiddenPrints():
if single_process or n_runs == 1:
for _ in range(n_runs):
#generate model and run til status goes back to 0 (finished)
model = Model(**model_params)
while model.status == 1:
model.step()
models.append(model)
else:
pool = multiprocessing.Pool()
try:
numcores = multiprocessing.cpu_count()
models = pool.map(stationsim_RipleysK.run_model, [model_params for _ in range(n_runs)])
finally:
pool.close() # Make sure whatever happens the processes are killed
return models
@staticmethod
def run_model(model_params):
"""
Create a new stationsim model using `model_params` and step it
until it has finished.
Parameters
------
model_params : dict
`model_params` dictionary of model parameters required for
stationsim to run. See stationsim_model.py for more details.
Returns
------
model : StaionSim object
the finished model
"""
model = Model(**model_params)
while model.status == 1:
model.step()
return model
def ripleysKE(self, collisions, width, height):
""" Generate Ripley's K (RK) curve for collisions in StationSim region.
For more info on RK see:
https://docs.astropy.org/en/stable/stats/ripley.html
https://wiki.landscapetoolbox.org/doku.php/spatial_analysis_methods:ripley_s_k_and_pair_correlation_function"
Parameters
------
collisions : list
list of model `collisions`
width, height : float
`width` and `height` of stationsim model
Returns
------
rkes, rs : list
lists of radii `rs` and corresponding Ripley's K values `rkes`
for a given set of model collisions.
"""
"define area of stationsim."
"init astropy RKE class with stationsim boundaries/area"
area = width * height
rke = RipleysKEstimator(area = area,
x_max = width, y_max = height,
y_min = 0, x_min = 0)
"""generate list of radii to assess. We generate 10 between 0
and the root of half the total area. More radii would give a higher
resolution but increases the computation time.
see https://wiki.landscapetoolbox.org/doku.php/spatial_analysis_methods:
ripley_s_k_and_pair_correlation_function
for more details on this"
"""
r = np.linspace(0, np.sqrt(area/2), 10)
"generate the full list of radii for data frame later."
"just repeats r above for how many models we have"
rs = [r]*len(collisions)
"placeholder list for RK estimates"
rkes = []
for i, collision in enumerate(collisions):
"""estimate RK curve given model collisions and list of radii
Note mode arguement here for how to deal with common edge effect problem.
Choice doesnt seem to have much effect in this case.
Either none or translation recommended.
"""
#rkes.append(rke(collisions, radii=r, mode='none'))
rkes.append(rke(collision, radii=r, mode='translation'))
#rkes.append(rke(collisions, radii=r, mode='ohser'))
#rkes.append(rke(collisions, radii=r, mode='var-width'))
#rkes.append(ke(collisions, radii=r, mode='ripley'))
"this can take a long time so here's a progess bar"
print("\r" + str((i+1)/len(collisions)*100)
+ "% complete ", end = "")
return rkes, rs
def panel_Regression_Prep(self, rkes, rs, id_number):
""" Build the list of model RKEs into a dataframe
Output dataframe has 4 columns
x: list of radii on which RK is estimated. Renamed to x for use
in R regression later
y: RKE curve corresponding to a given model's collisions and radii.
Renamed to y for R regression later
ids: identifies which specific model the row belongs to.
This has format "A_B" it is the Bth model of batch A.
For example if the id reads "0_8" it is from the eighth model of the
0th batch
split: identifies which batch the row belongs to. Either 0 (control group)
or 1 (test group)
Parameters
------
rkes, rs : list
list of lists of radii `rs` and corresponding RK estimates `rkes`.
Each pair of sublists corresponds to an individual model.
id_number : int
Assign each row of the data frame a number 0 if it is a control group
(e.g. python station results) or 1 if it is a test group (e.g. cpp results.)
Returns
------
data : array_like
assembled RK `data` from our list of models ready for fitting a
regression in R. Should have 4 columns as defined above.
"""
"preprocessing"
num_rs = len(rs[0])
rkes = np.ravel(rkes)
rs = np.ravel(rs)
data = pd.DataFrame([rs, rkes]).T
"rename rs and rkes columns for regression later."
data.columns = ["x", "y"]
"""generate individual model ID numbers. Each model has 10 entries
(10 radii) so every 10 entrires belong to the ith model. Start
with a list where every 10 values is one intiger (the model id).
"""
ids = (np.arange(len(rkes))//num_rs).tolist()
""" Then prefix every id with the batch number e.g. model 8 from
batch 0 becomes 0_8 . Allows unique IDs for every model such
that panel regression in R can recognise "individuals".
"""
for i, item in enumerate(ids):
ids[i] = str(id_number) + "_" + str(item)
data["ids"] = ids
"""add a column with just the batch number. for batch 0 this
is a column of 0s.
0 - control group. 1 - test group"""
split = [id_number] * len(rkes)
data["split"] = split
return data
def generate_Control_Frame(self, collisions, width, height):
"""generate a control group of RK curves to compare against
- calculate RK curves of each model's collisions
- store values in data frame.
- save data frame as a control to load against in main
Parameters
------
collisions : list
list of finished stationsim models `collisions` between agents.
Returns
------
data: array_like
assembled RK `data` from our list of collisions ready for fitting a
regression in R. Should have 4 columns as defined above in
panel_regression_prep.
"""
rkes, rs = self.ripleysKE(collisions)
data = self.panel_Regression_Prep(rkes, rs, 0)
width = model_params["width"]
height = model_params["height"]
pop_total = model_params["pop_total"]
gates_speed = model_params["gates_speed"]
f_name = "RK_csvs/control_" + f"{width}_{height}_{pop_total}_{gates_speed}"
f_name += ".csv"
self.save_Frame(data, f_name)
return data
def generate_Test_Frame(self, collisions, width, height):
""" Generate frame of test values to compare against
Parameters
------
collisions : list
list of finished stationsim models `collisions` between agents.
Returns
------
data: array_like
assembled RK `data` from our list of collisions ready for fitting a
regression in R. Should have 4 columns as defined above in
panel_regression_prep.
"""
rkes, rs = self.ripleysKE(collisions, width, height)
data = self.panel_Regression_Prep(rkes, rs, 1)
return data
def save_Frame(self, data, f_name):
""" Save a pandas data frame
Parameters
------
data : array_like
pandas `data` frame. usually from generate_Control/Test_Frame
output
f_name : str
`f_name` file name
"""
data.to_csv(f_name, index = False)
def load_Frame(self, f_name):
""" Load a pandas data frame.
Parameters
------
f_name : str
`f_name` file name
Returns
------
data : array_like
Pandas `data` frame usually from generate_Control/Test_Frame
output.
"""
return pd.read_csv(f_name)
def collisions_kde(self, collisions, width, height):
""" Plot spread of collisions through stationsim as a KDE plot
Parameters
------
collisions : list
some list of coordinates where `collisions` occur
"""
x = collisions[:,0]
y = collisions[:,1]
plt.figure()
im = kdeplot(x, y)
plt.xlim([0, width])
plt.ylim([0, height])
plt.xlabel("StationSim Width")
plt.ylabel("StationSim Height")
plt.title("KDE of Agent Collisions over StationSim")
def spaghetti_RK_Plot(self, data):
"""plot RK trajectories for several models and control and test batches
Parameters
------
data : array_like
data frame from generate_Control/Test_Frame output
"""
colours = ["black", "orangered"]
"0 black for control models"
"1 orange for test models"
f = plt.figure()
for item in set(data["ids"]):
sub_data = data.loc[data["ids"] == item]
rs = sub_data["x"]
rkes = sub_data["y"]
split = (sub_data["split"]).values[0]
plt.plot(rs,rkes, color = colours[split])
plt.plot(-1, -1, alpha = 1, color = colours[0],
label = "Control Group RKs")
plt.plot(-1, -1, alpha = 1, color = colours[1],
label = "Test Group RKs")
plt.xlabel("radius of RK circle")
plt.ylabel("RK Score")
plt.legend()
def notebook_RK_Plot(self, data1, data2):
"""Plot for RK notebook showing two extreme examples of RK curves
The idea is to have two frames with exactly one model in that show two
extreme cases of collisions clustering. We have a tight clustering case
in orange with a rapidly increasing RK score and a sparse case with a
shallow linear RK score.
Parameters
------
data1, data2 : array_like
`data1` and `data2` are two RKE dataframes from generate_Control_Frame.
They have a structure specified in said function.
"""
colours = ["black", "orangered"]
"0 black for control models"
"1 orange for test models"
f = plt.figure()
rs1 = data1["x"]
rkes1 = data1["y"]
plt.plot(rs1,rkes1, color = colours[0], label = "Dispersed Queueing Case")
rs2 = data2["x"]
rkes2 = data2["y"]
plt.plot(rs2,rkes2, color = colours[1], label = "Clustered Queueing Case")
plt.xlabel("radius of RK circle")
plt.ylabel("RK Score")
plt.legend()
def main(self, test_collisions, model_params):
"""Main function for comparing python and cpp outputs.
- Check a control file exists given specified model parameters
- If it exists, load it. If not, generate one using 100 model runs
- Generate control group data frame
- Calculate RK curves of test models.
- Generate corresponding test group data frame.
- concat control and test frames and save for analysis in R
using RK_population_modelling.R
Parameters
------
test_collisions : list
list of (nx2) array stationsim model 'collisions'
Returns
------
data : array_like
Joined pandas `data` frame for control and test groups.
"""
"generate control file name to load from model parameters"
width = model_params["width"]
height = model_params["height"]
pop_total = model_params["pop_total"]
gates_speed = model_params["gates_speed"]
f_name = "RK_csvs/control_" + f"{width}_{height}_{pop_total}_{gates_speed}" + ".csv"
"try loading said file. if no file make and save one."
try:
data_control = self.load_Frame(f_name)
print("Control data found at: " + f_name)
except:
print("No control frame found for given parameters.")
print("Generating control frame using large number of models (100).")
print("This may take a while if you have a large population of agents")
control_models = self.generate_Model_Sample(100, model_params)
data_control = self.generate_Control_Frame(control_models, width, height)
"generate data frame from test_collisions"
data_test = self.generate_Test_Frame(test_collisions, width, height)
"join control and test ferames and save as joined frame."
data = | pd.concat([data_control, data_test]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/mchalela/apode).
# Copyright (c) 2020, <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
from apode import datasets
from apode.basic import ApodeData
import numpy as np
import pandas as pd
import pytest
# =============================================================================
# TESTS COMMON
# =============================================================================
def test_default_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_invalid():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad.poverty("foo")
def test_get_pline_none():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline is None
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=None) == ad.poverty.headcount(
pline=pline
)
def test_get_pline_factor():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# factor < 0:
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=None, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=None, factor=-3)
def test_get_pline_median():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
factor = 0.3
pline = factor * np.median(ad.data.values)
assert ad.poverty.headcount(
pline="median", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_mean():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
factor = 0.3
pline = factor * np.mean(ad.data.values)
assert ad.poverty.headcount(
pline="mean", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_quantile():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline = "quantile"
q = 0.3
factor = 0.3
pline = factor * np.quantile(ad.data.values, q)
assert ad.poverty.chakravarty(
pline="quantile", factor=factor, q=q
) == ad.poverty.chakravarty(pline=pline)
assert ad.poverty.hagenaars(
pline="quantile", factor=factor, q=q
) == ad.poverty.hagenaars(pline=pline)
# pline = "quantile", q out of range
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=1.2)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=-0.2)
# =============================================================================
# TESTS HEADCOUNT
# =============================================================================
def test_headcount_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=pline) == 0.27
def test_headcount_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("headcount", pline=pline) == 0.27
def test_headcount_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_headcount_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=-1)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=0)
def test_headcount_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=100, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("headcount", pline=pline_min) == 0
assert ad.poverty("headcount", pline=pline_max) == 1
def test_headcount_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="headcount", pline=pline) == ad2.poverty(
method="headcount", pline=pline
)
def test_headcount_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline
)
def test_headcount_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline * k
)
# =============================================================================
# TESTS GAP
# =============================================================================
def test_gap_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.gap(pline=pline) == 0.13715275200855706
def test_gap_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("gap", pline=pline) == 0.13715275200855706
def test_gap_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("gap", pline=pline)
method_result = ad.poverty.gap(pline=pline)
assert call_result == method_result
def test_gap_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("gap", pline=-1)
def test_gap_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("gap", pline=pline_min) == 0
assert ad.poverty("gap", pline=pline_max) <= 1
def test_gap_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="gap", pline=pline) == ad2.poverty(
method="gap", pline=pline
)
def test_gap_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("gap", pline=pline), ad2.poverty("gap", pline=pline)
)
def test_gap_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("gap", pline=pline) == ad2.poverty(
"gap", pline=pline * k
)
# =============================================================================
# TESTS SEVERITY
# =============================================================================
def test_severity_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.severity(pline=pline) == 0.0925444945807559
def test_severity_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("severity", pline=pline) == 0.0925444945807559
def test_severity_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("severity", pline=pline)
method_result = ad.poverty.severity(pline=pline)
assert call_result == method_result
def test_severity_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("severity", pline=-1)
def test_severity_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("severity", pline=pline_min) == 0
assert ad.poverty("severity", pline=pline_max) <= 1
def test_severity_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="severity", pline=pline) == ad2.poverty(
method="severity", pline=pline
)
def test_severity_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("severity", pline=pline),
ad2.poverty("severity", pline=pline),
)
def test_severity_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("severity", pline=pline) == ad2.poverty(
"severity", pline=pline * k
)
# =============================================================================
# TESTS FGT
# =============================================================================
def test_fgt_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.fgt(pline=pline) == 0.27
def test_fgt_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("fgt", pline=pline) == 0.27
def test_fgt_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("fgt", pline=pline)
method_result = ad.poverty.fgt(pline=pline)
assert call_result == method_result
def test_fgt_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=-1)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=0)
def test_fgt_valid_alpha():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty.fgt(pline=1, alpha=-2)
def test_fgt_alpha_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
assert ad.poverty.fgt(pline=pline, alpha=1) == 0.26003924372489007
assert ad.poverty.fgt(pline=pline, alpha=0) == 0.4766666666666667
assert ad.poverty.fgt(pline=pline, alpha=10) == 0.049479474144909996
def test_fgt_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("fgt", pline=pline_min) == 0
assert ad.poverty("fgt", pline=pline_max) <= 1
def test_fgt_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="fgt", pline=pline) == ad2.poverty(
method="fgt", pline=pline
)
def test_fgt_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty("fgt", pline=pline)
def test_fgt_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty(
"fgt", pline=pline * k
)
# =============================================================================
# TESTS SEN
# =============================================================================
def test_sen_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sen(pline=pline) == 0.1826297337125855
def test_sen_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sen", pline=pline) == 0.1826297337125855
def test_sen_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sen", pline=pline)
method_result = ad.poverty.sen(pline=pline)
assert call_result == method_result
def test_sen_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sen", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sen", pline=0)
def test_sen_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sen", pline=pline_min) == 0
assert ad.poverty("sen", pline=pline_max) <= 1
def test_sen_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sen", pline=pline) == ad2.poverty(
method="sen", pline=pline
)
def test_sen_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sen", pline=pline) == ad2.poverty(
"sen", pline=pline * k
)
# =============================================================================
# TESTS SST
# =============================================================================
def test_sst_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sst(pline=pline) == 0.24950968072455512
def test_sst_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sst", pline=pline) == 0.24950968072455512
def test_sst_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sst", pline=pline)
method_result = ad.poverty.sst(pline=pline)
assert call_result == method_result
def test_sst_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sst", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sst", pline=0)
# @pytest.mark.xfail
def test_sst_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sst", pline=pline_min) == 0
assert ad.poverty("sst", pline=pline_max) <= 1 # CHECK, fails
def test_sst_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sst", pline=pline) == ad2.poverty(
method="sst", pline=pline
)
def test_sst_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sst", pline=pline) == ad2.poverty(
"sst", pline=pline * k
)
# =============================================================================
# TESTS WATTS
# =============================================================================
def test_watts_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.watts(pline=pline) == 0.2724322042654472
def test_watts_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("watts", pline=pline) == 0.2724322042654472
def test_watts_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("watts", pline=pline)
method_result = ad.poverty.watts(pline=pline)
assert call_result == method_result
def test_watts_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("watts", pline=-1)
with pytest.raises(ValueError):
ad.poverty("watts", pline=0)
def test_watts_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
assert ad.poverty("watts", pline=pline_min) == 0
def test_watts_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = | pd.DataFrame({"x": y}) | pandas.DataFrame |
import os, sys, platform, json, operator, multiprocessing, io, random, itertools, warnings, h5py, \
statistics, inspect, requests, validators, math, time, pprint, datetime, importlib, fsspec, scipy
# Python utils.
from textwrap import dedent
# External utils.
from tqdm import tqdm #progress bar.
from natsort import natsorted #file sorting.
import appdirs #os-agonistic folder.
# ORM.
from peewee import Model, CharField, IntegerField, BlobField, BooleanField, DateTimeField, ForeignKeyField
from playhouse.sqlite_ext import SqliteExtDatabase, JSONField
from playhouse.fields import PickleField
import dill as dill #complex serialization.
# ETL.
import pandas as pd
import numpy as np
from PIL import Image as Imaje
# Preprocessing & metrics.
import sklearn
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold #mandatory separate import.
from sklearn.feature_extraction.text import CountVectorizer
# Deep learning.
import keras
import torch
# Visualization.
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from .configuration import setup_database, destroy_database, get_db
name = "aiqc"
"""
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
- 'fork' makes all variables on main process available to child process. OS attempts not to duplicate all variables.
- 'spawn' requires that variables be passed to child as args, and seems to play by pickle's rules (e.g. no func in func).
- In Python 3.8, macOS changed default from 'fork' to 'spawn' , which is how I learned all this.
- Windows does not support 'fork'. It supports 'spawn'. So basically I have to play by spawn/ pickle rules.
- Spawn/ pickle dictates (1) where execute_jobs func is placed, (2) if MetricsCutoff func works, (3) if tqdm output is visible.
- Update: now MetricsCutoff is not working in `fork` mode.
- Wrote the `poll_progress` func for 'spawn' situations.
- If everything hits the fan, `run_jobs(in_background=False)` for a normal for loop.
- Tried `concurrent.futures` but it only works with `.py` from command line.
"""
if (os.name != 'nt'):
# If `force=False`, then `importlib.reload(aiqc)` triggers `RuntimeError: context already set`.
multiprocessing.set_start_method('fork', force=True)
app_dir_no_trailing_slash = appdirs.user_data_dir("aiqc")
# Adds either a trailing slash or backslashes depending on OS.
app_dir = os.path.join(app_dir_no_trailing_slash, '')
default_config_path = app_dir + "config.json"
default_db_path = app_dir + "aiqc.sqlite3"
#==================================================
# CONFIGURATION
#==================================================
def setup():
setup_database([ File, Tabular, Image,
Dataset,
Label, Feature,
Splitset, Featureset, Foldset, Fold,
Encoderset, Labelcoder, Featurecoder,
Algorithm, Hyperparamset, Hyperparamcombo,
Queue, Jobset, Job, Predictor, Prediction,
FittedEncoderset, FittedLabelcoder,
Window
])
def destroy_db(confirm:bool=False, rebuild:bool=False):
destroy_database([ File, Tabular, Image,
Dataset,
Label, Feature,
Splitset, Featureset, Foldset, Fold,
Encoderset, Labelcoder, Featurecoder,
Algorithm, Hyperparamset, Hyperparamcombo,
Queue, Jobset, Job, Predictor, Prediction,
FittedEncoderset, FittedLabelcoder,
Window
], confirm, rebuild)
#==================================================
# ORM
#==================================================
# --------- GLOBALS ---------
categorical_encoders = [
'OneHotEncoder', 'LabelEncoder', 'OrdinalEncoder',
'Binarizer', 'LabelBinarizer', 'MultiLabelBinarizer'
]
# --------- HELPER FUNCTIONS ---------
def listify(supposed_lst:object=None):
"""
- When only providing a single element, it's easy to forget to put it inside a list!
"""
if (supposed_lst is not None):
if (not isinstance(supposed_lst, list)):
supposed_lst = [supposed_lst]
# If it was already a list, check it for emptiness and `None`.
elif (isinstance(supposed_lst, list)):
if (not supposed_lst):
raise ValueError("Yikes - The list you provided is empty.")
if (None in supposed_lst):
raise ValueError(dedent(
f"Yikes - The list you provided contained `None` as an element." \
f"{supposed_lst}"
))
# Allow `is None` to pass through because we need it to trigger null conditions.
return supposed_lst
def dill_serialize(objekt:object):
blob = io.BytesIO()
dill.dump(objekt, blob)
blob = blob.getvalue()
return blob
def dill_deserialize(blob:bytes):
objekt = io.BytesIO(blob)
objekt = dill.load(objekt)
return objekt
def dill_reveal_code(serialized_objekt:object, print_it:bool=True):
code_str = (
dill.source.getsource(
dill_deserialize(serialized_objekt).__code__
)
)
if (print_it == True):
print(dedent(code_str))
return code_str
def torch_batcher(
features:object
, labels:object
, batch_size = 5
, enforce_sameSize:bool=False
, allow_1Sample:bool=False
):
features = torch.split(features, batch_size)
labels = torch.split(labels, batch_size)
features = torch_drop_invalid_batchSize(features)
labels = torch_drop_invalid_batchSize(labels)
return features, labels
def torch_drop_invalid_batchSize(
batched_data:object
, batch_size = 5
, enforce_sameSize:bool=False
, allow_1Sample:bool=False
):
if (batch_size == 1):
print("\nWarning - `batch_size==1` can lead to errors.\nE.g. running BatchNormalization on a single sample.\n")
# Similar to a % remainder, this will only apply to the last element in the batch.
last_batch_size = batched_data[-1].shape[0]
if (
((allow_1Sample == False) and (last_batch_size == 1))
or
((enforce_sameSize == True) and (batched_data[0].shape[0] != last_batch_size))
):
# So if there is a problem, just trim the last split.
batched_data = batched_data[:-1]
return batched_data
def tf_batcher(features:object, labels:object, batch_size = 5):
"""
- `np.array_split` allows for subarrays to be of different sizes, which is rare.
https://numpy.org/doc/stable/reference/generated/numpy.array_split.html
- If there is a remainder, it will evenly distribute samples into the other arrays.
- Have not tested this with >= 3D data yet.
"""
rows_per_batch = math.ceil(features.shape[0]/batch_size)
batched_features = np.array_split(features, rows_per_batch)
batched_features = np.array(batched_features, dtype=object)
batched_labels = np.array_split(labels, rows_per_batch)
batched_labels = np.array(batched_labels, dtype=object)
return batched_features, batched_labels
# --------- END HELPERS ---------
class BaseModel(Model):
"""
- Runs when the package is imported. http://docs.peewee-orm.com/en/latest/peewee/models.html
- ORM: by inheritting the BaseModel class, each Model class does not have to set Meta.
"""
class Meta:
database = get_db()
class Dataset(BaseModel):
"""
The sub-classes are not 1-1 tables. They simply provide namespacing for functions
to avoid functions riddled with if statements about dataset_type and null parameters.
"""
dataset_type = CharField() #tabular, image, sequence, graph, audio.
file_count = IntegerField() # only includes file_types that match the dataset_type.
source_path = CharField(null=True)
def make_label(id:int, columns:list):
columns = listify(columns)
l = Label.from_dataset(dataset_id=id, columns=columns)
return l
def make_feature(
id:int
, include_columns:list = None
, exclude_columns:list = None
):
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
feature = Feature.from_dataset(
dataset_id = id
, include_columns = include_columns
, exclude_columns = exclude_columns
)
return feature
def to_pandas(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular'):
df = Dataset.Tabular.to_pandas(id=dataset.id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'text'):
df = Dataset.Text.to_pandas(id=dataset.id, columns=columns, samples=samples)
elif ((dataset.dataset_type == 'image') or (dataset.dataset_type == 'sequence')):
raise ValueError("\nYikes - `dataset_type={dataset.dataset_type}` does not have a `to_pandas()` method.\n")
return df
def to_numpy(id:int, columns:list=None, samples:list=None):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (dataset.dataset_type == 'tabular'):
arr = Dataset.Tabular.to_numpy(id=id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'image'):
if (columns is not None):
raise ValueError("\nYikes - `Dataset.Image.to_numpy` does not accept a `columns` argument.\n")
arr = Dataset.Image.to_numpy(id=id, samples=samples)
elif (dataset.dataset_type == 'text'):
arr = Dataset.Text.to_numpy(id=id, columns=columns, samples=samples)
elif (dataset.dataset_type == 'sequence'):
arr = Dataset.Sequence.to_numpy(id=id, columns=columns, samples=samples)
return arr
def to_strings(id:int, samples:list=None):
dataset = Dataset.get_by_id(id)
samples = listify(samples)
if (dataset.dataset_type == 'tabular' or dataset.dataset_type == 'image'):
raise ValueError("\nYikes - This Dataset class does not have a `to_strings()` method.\n")
elif (dataset.dataset_type == 'text'):
return Dataset.Text.to_strings(id=dataset.id, samples=samples)
def sorted_file_list(dir_path:str):
if (not os.path.exists(dir_path)):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(dir_path)`:\n{dir_path}\n")
path = os.path.abspath(dir_path)
if (os.path.isdir(path) == False):
raise ValueError(f"\nYikes - The path that you provided is not a directory:{path}\n")
file_paths = os.listdir(path)
# prune hidden files and directories.
file_paths = [f for f in file_paths if not f.startswith('.')]
file_paths = [f for f in file_paths if not os.path.isdir(f)]
if not file_paths:
raise ValueError(f"\nYikes - The directory that you provided has no files in it:{path}\n")
# folder path is already absolute
file_paths = [os.path.join(path, f) for f in file_paths]
file_paths = natsorted(file_paths)
return file_paths
def get_main_file(id:int):
dataset = Dataset.get_by_id(id)
if (dataset.dataset_type == 'image'):
raise ValueError("\n Dataset class does not support get_main_file() method for `image` data type,\n")
file = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='tabular', File.file_index==0
)[0]
return file
def get_main_tabular(id:int):
"""
Works on both `Dataset.Tabular`, `Dataset.Sequence`, and `Dataset.Text`
"""
file = Dataset.get_main_file(id)
return file.tabulars[0]
def arr_validate(ndarray):
if (type(ndarray).__name__ != 'ndarray'):
raise ValueError("\nYikes - The `ndarray` you provided is not of the type 'ndarray'.\n")
if (ndarray.dtype.names is not None):
raise ValueError(dedent("""
Yikes - Sorry, we do not support NumPy Structured Arrays.
However, you can use the `dtype` dict and `column_names` to handle each column specifically.
"""))
if (ndarray.size == 0):
raise ValueError("\nYikes - The ndarray you provided is empty: `ndarray.size == 0`.\n")
class Tabular():
"""
- Does not inherit the Dataset class e.g. `class Tabular(Dataset):`
because then ORM would make a separate table for it.
- It is just a collection of methods and default variables.
"""
dataset_type = 'tabular'
file_index = 0
file_count = 1
def from_path(
file_path:str
, source_file_format:str
, name:str = None
, dtype:object = None
, column_names:list = None
, skip_header_rows:object = 'infer'
, ingest:bool = True
):
column_names = listify(column_names)
accepted_formats = ['csv', 'tsv', 'parquet']
if (source_file_format not in accepted_formats):
raise ValueError(f"\nYikes - Available file formats include csv, tsv, and parquet.\nYour file format: {source_file_format}\n")
if (not os.path.exists(file_path)):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(file_path)`:\n{file_path}\n")
if (not os.path.isfile(file_path)):
raise ValueError(dedent(
f"Yikes - The path you provided is a directory according to `os.path.isfile(file_path)`:" \
f"{file_path}" \
f"But `dataset_type=='tabular'` only supports a single file, not an entire directory.`"
))
# Use the raw, not absolute path for the name.
if (name is None):
name = file_path
source_path = os.path.abspath(file_path)
dataset = Dataset.create(
dataset_type = Dataset.Tabular.dataset_type
, file_count = Dataset.Tabular.file_count
, source_path = source_path
, name = name
)
try:
File.Tabular.from_file(
path = file_path
, source_file_format = source_file_format
, dtype = dtype
, column_names = column_names
, skip_header_rows = skip_header_rows
, ingest = ingest
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_pandas(
dataframe:object
, name:str = None
, dtype:object = None
, column_names:list = None
):
column_names = listify(column_names)
if (type(dataframe).__name__ != 'DataFrame'):
raise ValueError("\nYikes - The `dataframe` you provided is not `type(dataframe).__name__ == 'DataFrame'`\n")
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, dataset_type = Dataset.Tabular.dataset_type
, name = name
, source_path = None
)
try:
File.Tabular.from_pandas(
dataframe = dataframe
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def from_numpy(
ndarray:object
, name:str = None
, dtype:object = None
, column_names:list = None
):
column_names = listify(column_names)
Dataset.arr_validate(ndarray)
dimensions = len(ndarray.shape)
if (dimensions > 2) or (dimensions < 1):
raise ValueError(dedent(f"""
Yikes - Tabular Datasets only support 1D and 2D arrays.
Your array dimensions had <{dimensions}> dimensions.
"""))
dataset = Dataset.create(
file_count = Dataset.Tabular.file_count
, name = name
, source_path = None
, dataset_type = Dataset.Tabular.dataset_type
)
try:
File.Tabular.from_numpy(
ndarray = ndarray
, dtype = dtype
, column_names = column_names
, dataset_id = dataset.id
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pandas(
id:int
, columns:list = None
, samples:list = None
):
file = Dataset.get_main_file(id)#`id` belongs to dataset, not file
columns = listify(columns)
samples = listify(samples)
df = File.Tabular.to_pandas(id=file.id, samples=samples, columns=columns)
return df
def to_numpy(
id:int
, columns:list = None
, samples:list = None
):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
# This calls the method above. It does not need `.Tabular`
df = dataset.to_pandas(columns=columns, samples=samples)
ndarray = df.to_numpy()
return ndarray
class Image():
dataset_type = 'image'
def from_folder(
folder_path:str
, name:str = None
, pillow_save:dict = {}
, ingest:bool = True
):
if ((pillow_save!={}) and (ingest==False)):
raise ValueError("\nYikes - `pillow_save` cannot be defined if `ingest==False`.\n")
if (name is None):
name = folder_path
source_path = os.path.abspath(folder_path)
file_paths = Dataset.sorted_file_list(source_path)
file_count = len(file_paths)
dataset = Dataset.create(
file_count = file_count
, name = name
, source_path = source_path
, dataset_type = Dataset.Image.dataset_type
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, path in enumerate(tqdm(
file_paths
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(path)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, p in enumerate(tqdm(
file_paths
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
File.Image.from_file(
path = p
, pillow_save = pillow_save
, file_index = i
, ingest = ingest
, dataset_id = dataset.id
)
except:
dataset.delete_instance()
raise
return dataset
def from_urls(
urls:list
, pillow_save:dict = {}
, name:str = None
, source_path:str = None
, ingest:bool = True
):
if ((pillow_save!={}) and (ingest==False)):
raise ValueError("\nYikes - `pillow_save` cannot be defined if `ingest==False`.\n")
urls = listify(urls)
for u in urls:
validation = validators.url(u)
if (validation != True): #`== False` doesn't work.
raise ValueError(f"\nYikes - Invalid url detected within `urls` list:\n'{u}'\n")
file_count = len(urls)
dataset = Dataset.create(
file_count = file_count
, name = name
, dataset_type = Dataset.Image.dataset_type
, source_path = source_path
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
sizes = []
modes = []
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Validating Images 🖼️"
, ncols = 85
)):
img = Imaje.open(
requests.get(url, stream=True).raw
)
sizes.append(img.size)
modes.append(img.mode)
if (len(set(sizes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same width and height.
`PIL.Image.size`\nHere are the unique sizes you provided:\n{set(sizes)}
"""))
elif (len(set(modes)) > 1):
raise ValueError(dedent(f"""
Yikes - All images in the Dataset must be of the same mode aka colorscale.
`PIL.Image.mode`\nHere are the unique modes you provided:\n{set(modes)}
"""))
try:
for i, url in enumerate(tqdm(
urls
, desc = "🖼️ Ingesting Images 🖼️"
, ncols = 85
)):
File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, ingest = ingest
, dataset_id = dataset.id
)
"""
for i, url in enumerate(urls):
file = File.Image.from_url(
url = url
, pillow_save = pillow_save
, file_index = i
, dataset_id = dataset.id
)
"""
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_pillow(id:int, samples:list=None):
"""
- This does not have `columns` attrbute because it is only for fetching images.
- Have to fetch as image before feeding into numpy `numpy.array(Image.open())`.
- Future: could return the tabular data along with it.
- Might need this for Preprocess where rotate images and such.
"""
samples = listify(samples)
files = Dataset.Image.get_image_files(id, samples=samples)
images = [f.Image.to_pillow(f.id) for f in files]
return images
def to_numpy(id:int, samples:list=None):
"""
- Because Pillow works directly with numpy, there's no need for pandas right now.
- But downstream methods are using pandas.
"""
samples = listify(samples)
images = Dataset.Image.to_pillow(id, samples=samples)
images = [np.array(img) for img in images]
images = np.array(images)
"""
- Pixel values range from 0-255.
- `np.set_printoptions(threshold=99999)` to inspect for yourself.
- It will look like some are all 0, but that's just the black edges.
"""
images = images/255
return images
def get_image_files(id:int, samples:list=None):
samples = listify(samples)
files = File.select().join(Dataset).where(
Dataset.id==id, File.file_type=='image'
).order_by(File.file_index)# Ascending by default.
# Select from list by index.
if (samples is not None):
files = [files[i] for i in samples]
return files
class Text():
dataset_type = 'text'
file_count = 1
column_name = 'TextData'
def from_strings(
strings: list,
name: str = None
):
for expectedString in strings:
if type(expectedString) != str:
raise ValueError(f'\nThe input contains an object of type non-str type: {type(expectedString)}')
dataframe = pd.DataFrame(strings, columns=[Dataset.Text.column_name], dtype="object")
return Dataset.Text.from_pandas(dataframe, name)
def from_pandas(
dataframe:object,
name:str = None,
dtype:object = None,
column_names:list = None
):
if Dataset.Text.column_name not in list(dataframe.columns):
raise ValueError("\nYikes - The `dataframe` you provided doesn't contain 'TextData' column. Please rename the column containing text data to 'TextData'`\n")
if dataframe[Dataset.Text.column_name].dtypes != 'O':
raise ValueError("\nYikes - The `dataframe` you provided contains 'TextData' column with incorrect dtype: column dtype != object\n")
dataset = Dataset.Tabular.from_pandas(dataframe, name, dtype, column_names)
dataset.dataset_type = Dataset.Text.dataset_type
dataset.save()
return dataset
def from_path(
file_path:str
, source_file_format:str
, name:str = None
, dtype:object = None
, column_names:list = None
, skip_header_rows:object = 'infer'
):
dataset = Dataset.Tabular.from_path(file_path, source_file_format, name, dtype, column_names, skip_header_rows)
dataset.dataset_type = Dataset.Text.dataset_type
dataset.save()
return dataset
def from_folder(
folder_path:str,
name:str = None
):
if name is None:
name = folder_path
source_path = os.path.abspath(folder_path)
input_files = Dataset.sorted_file_list(source_path)
files_data = []
for input_file in input_files:
with open(input_file, 'r') as file_pointer:
files_data.extend([file_pointer.read()])
return Dataset.Text.from_strings(files_data, name)
def to_pandas(
id:int,
columns:list = None,
samples:list = None
):
df = Dataset.Tabular.to_pandas(id, columns, samples)
if Dataset.Text.column_name not in columns:
return df
word_counts, feature_names = Dataset.Text.get_feature_matrix(df)
df = pd.DataFrame(word_counts.todense(), columns = feature_names)
return df
def to_numpy(
id:int,
columns:list = None,
samples:list = None
):
df = Dataset.Tabular.to_pandas(id, columns, samples)
if Dataset.Text.column_name not in columns:
return df.to_numpy()
word_counts, feature_names = Dataset.Text.get_feature_matrix(df)
return word_counts.todense()
def get_feature_matrix(
dataframe:object
):
count_vect = CountVectorizer(max_features = 200)
word_counts = count_vect.fit_transform(dataframe[Dataset.Text.column_name].tolist())
return word_counts, count_vect.get_feature_names()
def to_strings(
id:int,
samples:list = None
):
data_df = Dataset.Tabular.to_pandas(id, [Dataset.Text.column_name], samples)
return data_df[Dataset.Text.column_name].tolist()
class Sequence():
dataset_type = 'sequence'
def from_numpy(
ndarray3D_or_npyPath:object
, name:str = None
, dtype:object = None
, column_names:list = None
, ingest:bool = True
):
if ((ingest==False) and (isinstance(dtype, dict))):
raise ValueError("\nYikes - If `ingest==False` then `dtype` must be either a str or a single NumPy-based type.\n")
# Fetch array from .npy if it is not an in-memory array.
if (str(ndarray3D_or_npyPath.__class__) != "<class 'numpy.ndarray'>"):
if (not isinstance(ndarray3D_or_npyPath, str)):
raise ValueError("\nYikes - If `ndarray3D_or_npyPath` is not an array then it must be a string-based path.\n")
if (not os.path.exists(ndarray3D_or_npyPath)):
raise ValueError("\nYikes - The path you provided does not exist according to `os.path.exists(ndarray3D_or_npyPath)`\n")
if (not os.path.isfile(ndarray3D_or_npyPath)):
raise ValueError("\nYikes - The path you provided is not a file according to `os.path.isfile(ndarray3D_or_npyPath)`\n")
source_path = ndarray3D_or_npyPath
try:
# `allow_pickle=False` prevented it from reading the file.
ndarray_3D = np.load(file=ndarray3D_or_npyPath)
except:
print("\nYikes - Failed to `np.load(file=ndarray3D_or_npyPath)` with your `ndarray3D_or_npyPath`:\n")
print(f"{ndarray3D_or_npyPath}\n")
raise
elif (str(ndarray3D_or_npyPath.__class__) == "<class 'numpy.ndarray'>"):
source_path = None
ndarray_3D = ndarray3D_or_npyPath
column_names = listify(column_names)
Dataset.arr_validate(ndarray_3D)
dimensions = len(ndarray_3D.shape)
if (dimensions != 3):
raise ValueError(dedent(f"""
Yikes - Sequence Datasets can only be constructed from 3D arrays.
Your array dimensions had <{dimensions}> dimensions.
"""))
file_count = len(ndarray_3D)
dataset = Dataset.create(
file_count = file_count
, name = name
, dataset_type = Dataset.Sequence.dataset_type
, source_path = source_path
)
#Make sure the shape and mode of each image are the same before writing the Dataset.
shapes = []
for i, arr in enumerate(tqdm(
ndarray_3D
, desc = "⏱️ Validating Sequences 🧬"
, ncols = 85
)):
shapes.append(arr.shape)
if (len(set(shapes)) > 1):
dataset.delete_instance()# Orphaned.
raise ValueError(dedent(f"""
Yikes - All 2D arrays in the Dataset must be of the shape.
`ndarray.shape`\nHere are the unique sizes you provided:\n{set(shapes)}
"""))
try:
for i, arr in enumerate(tqdm(
ndarray_3D
, desc = "⏱️ Ingesting Sequences 🧬"
, ncols = 85
)):
File.Tabular.from_numpy(
ndarray = arr
, dataset_id = dataset.id
, column_names = column_names
, dtype = dtype
, _file_index = i
, ingest = ingest
)
except:
dataset.delete_instance() # Orphaned.
raise
return dataset
def to_numpy(
id:int,
columns:list = None,
samples:list = None
):
dataset = Dataset.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (samples is None):
files = dataset.files
elif (samples is not None):
# Here the 'sample' is the entire file. Whereas, in 2D 'sample==row'.
# So run a query to get those files: `<<` means `in`.
files = File.select().join(Dataset).where(
Dataset.id==dataset.id, File.file_index<<samples
)
files = list(files)
# Then call them with the column filter.
# So don't pass `samples=samples` to the file.
list_2D = [f.to_numpy(columns=columns) for f in files]
arr_3D = np.array(list_2D)
return arr_3D
# Graph
# handle nodes and edges as separate tabular types?
# node_data is pretty much tabular sequence (varied length) data right down to the columns.
# the only unique thing is an edge_data for each Graph file.
# attach multiple file types to a file File(id=1).tabular, File(id=1).graph?
class File(BaseModel):
"""
- Due to the fact that different types of Files have different attributes
(e.g. File.Tabular columns=JSON or File.Graph nodes=Blob, edges=Blob),
I am making each file type its own subclass and 1-1 table. This approach
allows for the creation of custom File types.
- If `blob=None` then isn't persisted therefore fetch from source_path or s3_path.
- Note that `dtype` does not require every column to be included as a key in the dictionary.
"""
file_type = CharField()
file_format = CharField() # png, jpg, parquet.
file_index = IntegerField() # image, sequence, graph.
shape = JSONField()
is_ingested = BooleanField()
skip_header_rows = PickleField(null=True) #Image does not have.
source_path = CharField(null=True) # when `from_numpy` or `from_pandas`.
blob = BlobField(null=True) # when `is_ingested==False`.
dataset = ForeignKeyField(Dataset, backref='files')
"""
Classes are much cleaner than a knot of if statements in every method,
and `=None` for every parameter.
"""
def to_numpy(id:int, columns:list=None, samples:list=None):
file = File.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (file.file_type == 'tabular'):
arr = File.Tabular.to_numpy(id=id, columns=columns, samples=samples)
elif (file.file_type == 'image'):
arr = File.Image.to_numpy(id=id, columns=columns, samples=samples)
return arr
class Tabular():
file_type = 'tabular'
def from_pandas(
dataframe:object
, dataset_id:int
, dtype:object = None # Accepts a single str for the entire df, but utlimate it gets saved as one dtype per column.
, column_names:list = None
, source_path:str = None # passed in via from_file, but not from_numpy.
, ingest:bool = True # from_file() method overwrites this.
, file_format:str = 'parquet' # from_file() method overwrites this.
, skip_header_rows:int = 'infer'
, _file_index:int = 0 # Dataset.Sequence overwrites this.
):
column_names = listify(column_names)
File.Tabular.df_validate(dataframe, column_names)
# We need this metadata whether ingested or not.
dataframe, columns, shape, dtype = File.Tabular.df_set_metadata(
dataframe=dataframe, column_names=column_names, dtype=dtype
)
if (ingest==True):
blob = File.Tabular.df_to_compressed_parquet_bytes(dataframe)
elif (ingest==False):
blob = None
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Tabular.file_type
, file_format = file_format
, file_index = _file_index
, shape = shape
, source_path = source_path
, skip_header_rows = skip_header_rows
, is_ingested = ingest
, dataset = dataset
)
try:
Tabular.create(
columns = columns
, dtypes = dtype
, file_id = file.id
)
except:
file.delete_instance() # Orphaned.
raise
return file
def from_numpy(
ndarray:object
, dataset_id:int
, column_names:list = None
, dtype:object = None #Or single string.
, _file_index:int = 0
, ingest:bool = True
):
column_names = listify(column_names)
"""
Only supporting homogenous arrays because structured arrays are a pain
when it comes time to convert them to dataframes. It complained about
setting an index, scalar types, and dimensionality... yikes.
Homogenous arrays keep dtype in `arr.dtype==dtype('int64')`
Structured arrays keep column names in `arr.dtype.names==('ID', 'Ring')`
Per column dtypes dtypes from structured array <https://stackoverflow.com/a/65224410/5739514>
"""
Dataset.arr_validate(ndarray)
"""
column_names and dict-based dtype will be handled by our `from_pandas()` method.
`pd.DataFrame` method only accepts a single dtype str, or infers if None.
"""
df = pd.DataFrame(data=ndarray)
file = File.Tabular.from_pandas(
dataframe = df
, dataset_id = dataset_id
, dtype = dtype
# Setting `column_names` will not overwrite the first row of homogenous array:
, column_names = column_names
, _file_index = _file_index
, ingest = ingest
)
return file
def from_file(
path:str
, source_file_format:str
, dataset_id:int
, dtype:object = None
, column_names:list = None
, skip_header_rows:object = 'infer'
, ingest:bool = True
):
column_names = listify(column_names)
df = File.Tabular.path_to_df(
path = path
, source_file_format = source_file_format
, column_names = column_names
, skip_header_rows = skip_header_rows
)
file = File.Tabular.from_pandas(
dataframe = df
, dataset_id = dataset_id
, dtype = dtype
, column_names = None # See docstring above.
, source_path = path
, file_format = source_file_format
, skip_header_rows = skip_header_rows
, ingest = ingest
)
return file
def to_pandas(
id:int
, columns:list = None
, samples:list = None
):
"""
This function could be optimized to read columns and rows selectively
rather than dropping them after the fact.
https://stackoverflow.com/questions/64050609/pyarrow-read-parquet-via-column-index-or-order
"""
file = File.get_by_id(id)
columns = listify(columns)
samples = listify(samples)
if (file.is_ingested==False):
# future: check if `query_fetcher` defined.
df = File.Tabular.path_to_df(
path = file.source_path
, source_file_format = file.file_format
, column_names = columns
, skip_header_rows = file.skip_header_rows
)
elif (file.is_ingested==True):
df = pd.read_parquet(
io.BytesIO(file.blob)
, columns=columns
)
# Ensures columns are rearranged to be in the correct order.
if ((columns is not None) and (df.columns.to_list() != columns)):
df = df.filter(columns)
# Specific rows.
if (samples is not None):
df = df.iloc[samples]
# Accepts dict{'column_name':'dtype_str'} or a single str.
tab = file.tabulars[0]
df_dtypes = tab.dtypes
if (df_dtypes is not None):
if (isinstance(df_dtypes, dict)):
if (columns is None):
columns = tab.columns
# Prunes out the excluded columns from the dtype dict.
df_dtype_cols = list(df_dtypes.keys())
for col in df_dtype_cols:
if (col not in columns):
del df_dtypes[col]
elif (isinstance(df_dtypes, str)):
pass #dtype just gets applied as-is.
df = df.astype(df_dtypes)
return df
def to_numpy(
id:int
, columns:list = None
, samples:list = None
):
"""
This is the only place where to_numpy() relies on to_pandas().
It does so because pandas is good with the parquet and dtypes.
"""
columns = listify(columns)
samples = listify(samples)
file = File.get_by_id(id)
# Handles when Dataset.Sequence is stored as a single .npy file
if ((file.dataset.dataset_type=='sequence') and (file.is_ingested==False)):
# Subsetting a File via `samples` is irrelevant here because the entire File is 1 sample.
# Subset the columns:
if (columns is not None):
col_indices = Job.colIndices_from_colNames(
column_names = file.tabulars[0].columns
, desired_cols = columns
)
dtype = list(file.tabulars[0].dtypes.values())[0] #`ingest==False` only allows singular dtype.
# Verified that it is lazy via `sys.getsizeof()`
lazy_load = np.load(file.dataset.source_path)
if (columns is not None):
# First accessor[] gets the 2D. Second accessor[] gets the 2D.
arr = lazy_load[file.file_index][:,col_indices].astype(dtype)
else:
arr = lazy_load[file.file_index].astype(dtype)
else:
df = File.Tabular.to_pandas(id=id, columns=columns, samples=samples)
arr = df.to_numpy()
return arr
#Future: Add to_tensor and from_tensor? Or will numpy suffice?
def pandas_stringify_columns(df, columns):
"""
- `columns` is user-defined.
- Pandas will assign a range of int-based columns if there are no column names.
So I want to coerce them to strings because I don't want both string and int-based
column names for when calling columns programmatically,
and more importantly, 'ValueError: parquet must have string column names'
"""
cols_raw = df.columns.to_list()
if (columns is None):
# in case the columns were a range of ints.
cols_str = [str(c) for c in cols_raw]
else:
cols_str = columns
# dict from 2 lists
cols_dct = dict(zip(cols_raw, cols_str))
df = df.rename(columns=cols_dct)
columns = df.columns.to_list()
return df, columns
def df_validate(dataframe:object, column_names:list):
if (dataframe.empty):
raise ValueError("\nYikes - The dataframe you provided is empty according to `df.empty`\n")
if (column_names is not None):
col_count = len(column_names)
structure_col_count = dataframe.shape[1]
if (col_count != structure_col_count):
raise ValueError(dedent(f"""
Yikes - The dataframe you provided has <{structure_col_count}> columns,
but you provided <{col_count}> columns.
"""))
def df_set_metadata(
dataframe:object
, column_names:list = None
, dtype:object = None
):
shape = {}
shape['rows'], shape['columns'] = dataframe.shape[0], dataframe.shape[1]
"""
- Passes in user-defined columns in case they are specified.
- Pandas auto-assigns int-based columns return a range when `df.columns`,
but this forces each column name to be its own str.
"""
dataframe, columns = File.Tabular.pandas_stringify_columns(df=dataframe, columns=column_names)
"""
- At this point, user-provided `dtype` can be either a dict or a singular string/ class.
- But a Pandas dataframe in-memory only has `dtypes` dict not a singular `dtype` str.
- So we will ensure that there is 1 dtype per column.
"""
if (dtype is not None):
# Accepts dict{'column_name':'dtype_str'} or a single str.
try:
dataframe = dataframe.astype(dtype)
except:
raise ValueError("\nYikes - Failed to apply the dtypes you specified to the data you provided.\n")
"""
Check if any user-provided dtype against actual dataframe dtypes to see if conversions failed.
Pandas dtype seems robust in comparing dtypes:
Even things like `'double' == dataframe['col_name'].dtype` will pass when `.dtype==np.float64`.
Despite looking complex, category dtype converts to simple 'category' string.
"""
if (not isinstance(dtype, dict)):
# Inspect each column:dtype pair and check to see if it is the same as the user-provided dtype.
actual_dtypes = dataframe.dtypes.to_dict()
for col_name, typ in actual_dtypes.items():
if (typ != dtype):
raise ValueError(dedent(f"""
Yikes - You specified `dtype={dtype},
but Pandas did not convert it: `dataframe['{col_name}'].dtype == {typ}`.
You can either use a different dtype, or try to set your dtypes prior to ingestion in Pandas.
"""))
elif (isinstance(dtype, dict)):
for col_name, typ in dtype.items():
if (typ != dataframe[col_name].dtype):
raise ValueError(dedent(f"""
Yikes - You specified `dataframe['{col_name}']:dtype('{typ}'),
but Pandas did not convert it: `dataframe['{col_name}'].dtype == {dataframe[col_name].dtype}`.
You can either use a different dtype, or try to set your dtypes prior to ingestion in Pandas.
"""))
"""
Testing outlandish dtypes:
- `DataFrame.to_parquet(engine='auto')` fails on:
'complex', 'longfloat', 'float128'.
- `DataFrame.to_parquet(engine='auto')` succeeds on:
'string', np.uint8, np.double, 'bool'.
- But the new 'string' dtype is not a numpy type!
so operations like `np.issubdtype` and `StringArray.unique().tolist()` fail.
"""
excluded_types = ['string', 'complex', 'longfloat', 'float128']
actual_dtypes = dataframe.dtypes.to_dict().items()
for col_name, typ in actual_dtypes:
for et in excluded_types:
if (et in str(typ)):
raise ValueError(dedent(f"""
Yikes - You specified `dtype['{col_name}']:'{typ}',
but aiqc does not support the following dtypes: {excluded_types}
"""))
"""
Now, we take the all of the resulting dataframe dtypes and save them.
Regardless of whether or not they were user-provided.
Convert the classed `dtype('float64')` to a string so we can use it in `.to_pandas()`
"""
dtype = {k: str(v) for k, v in actual_dtypes}
# Each object has the potential to be transformed so each object must be returned.
return dataframe, columns, shape, dtype
def df_to_compressed_parquet_bytes(dataframe:object):
"""
- The Parquet file format naturally preserves pandas/numpy dtypes.
Originally, we were using the `pyarrow` engine, but it has poor timedelta dtype support.
https://towardsdatascience.com/stop-persisting-pandas-data-frames-in-csvs-f369a6440af5
- Although `fastparquet` engine preserves timedelta dtype, but it does not work with BytesIO.
https://github.com/dask/fastparquet/issues/586#issuecomment-861634507
"""
fs = fsspec.filesystem("memory")
temp_path = "memory://temp.parq"
dataframe.to_parquet(
temp_path
, engine = "fastparquet"
, compression = "gzip"
, index = False
)
blob = fs.cat(temp_path)
fs.delete(temp_path)
return blob
def path_to_df(
path:str
, source_file_format:str
, column_names:list
, skip_header_rows:object
):
"""
Previously, I was using pyarrow for all tabular/ sequence file formats.
However, it had worse support for missing column names and header skipping.
So I switched to pandas for handling csv/tsv, but read_parquet()
doesn't let you change column names easily, so using pyarrow for parquet.
"""
if (not os.path.exists(path)):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\n{path}\n")
if (not os.path.isfile(path)):
raise ValueError(f"\nYikes - The path you provided is not a file according to `os.path.isfile(path)`:\n{path}\n")
if (source_file_format == 'tsv') or (source_file_format == 'csv'):
if (source_file_format == 'tsv') or (source_file_format is None):
sep='\t'
source_file_format = 'tsv' # Null condition.
elif (source_file_format == 'csv'):
sep=','
df = pd.read_csv(
filepath_or_buffer = path
, sep = sep
, names = column_names
, header = skip_header_rows
)
elif (source_file_format == 'parquet'):
if (skip_header_rows != 'infer'):
raise ValueError(dedent("""
Yikes - The argument `skip_header_rows` is not supported for `source_file_format='parquet'`
because Parquet stores column names as metadata.\n
"""))
df = pd.read_parquet(path=path, engine='fastparquet')
df, columns = File.Tabular.pandas_stringify_columns(df=df, columns=column_names)
return df
class Image():
file_type = 'image'
def from_file(
path:str
, file_index:int
, dataset_id:int
, pillow_save:dict = {}
, ingest:bool = True
):
if not os.path.exists(path):
raise ValueError(f"\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\n{path}\n")
if not os.path.isfile(path):
raise ValueError(f"\nYikes - The path you provided is not a file according to `os.path.isfile(path)`:\n{path}\n")
path = os.path.abspath(path)
img = Imaje.open(path)
shape = {
'width': img.size[0]
, 'height':img.size[1]
}
if (ingest==True):
blob = io.BytesIO()
img.save(blob, format=img.format, **pillow_save)
blob = blob.getvalue()
elif (ingest==False):
blob = None
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Image.file_type
, file_format = img.format
, file_index = file_index
, shape = shape
, source_path = path
, is_ingested = ingest
, dataset = dataset
)
try:
Image.create(
mode = img.mode
, size = img.size
, file = file
, pillow_save = pillow_save
)
except:
file.delete_instance() # Orphaned.
raise
return file
def from_url(
url:str
, file_index:int
, dataset_id:int
, pillow_save:dict = {}
, ingest:bool = True
):
# URL format is validated in `from_urls`.
try:
img = Imaje.open(
requests.get(url, stream=True).raw
)
except:
raise ValueError(f"\nYikes - Could not open file at this url with Pillow library:\n{url}\n")
shape = {
'width': img.size[0]
, 'height':img.size[1]
}
if (ingest==True):
blob = io.BytesIO()
img.save(blob, format=img.format, **pillow_save)
blob = blob.getvalue()
elif (ingest==False):
blob = None
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Image.file_type
, file_format = img.format
, file_index = file_index
, shape = shape
, source_path = url
, is_ingested = ingest
, dataset = dataset
)
try:
Image.create(
mode = img.mode
, size = img.size
, file = file
, pillow_save = pillow_save
)
except:
file.delete_instance() # Orphaned.
raise
return file
def to_pillow(id:int):
#https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open
file = File.get_by_id(id)
if (file.file_type != 'image'):
raise ValueError(dedent(f"""
Yikes - Only `file.file_type='image' can be converted to Pillow images.
But you provided `file.file_type`: <{file.file_type}>
"""))
#`mode` must be 'r'": https://pillow.readthedocs.io/en/stable/reference/Image.html
if (file.is_ingested==True):
img_bytes = io.BytesIO(file.blob)
img = Imaje.open(img_bytes, mode='r')
elif (file.is_ingested==False):
# Future: store `is_url`.
try:
img = Imaje.open(file.source_path, mode='r')
except:
img = Imaje.open(
requests.get(file.source_path, stream=True).raw
, mode='r'
)
return img
class Tabular(BaseModel):
"""
- Do not change `dtype=PickleField()` because we are stringifying the columns.
I was tempted to do so for types like `np.float`, but we parse the final
type that Pandas decides to use.
"""
# Is sequence just a subset of tabular with a file_index?
columns = JSONField()
dtypes = JSONField()
file = ForeignKeyField(File, backref='tabulars')
class Image(BaseModel):
#https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
mode = CharField()
size = PickleField()
pillow_save = JSONField()
file = ForeignKeyField(File, backref='images')
class Label(BaseModel):
"""
- Label accepts multiple columns in case it is already OneHotEncoded (e.g. tensors).
- At this point, we assume that the Label is always a tabular dataset.
"""
columns = JSONField()
column_count = IntegerField()
unique_classes = JSONField(null=True) # For categoricals and binaries. None for continuous.
dataset = ForeignKeyField(Dataset, backref='labels')
def from_dataset(dataset_id:int, columns:list):
d = Dataset.get_by_id(dataset_id)
columns = listify(columns)
if (d.dataset_type != 'tabular' and d.dataset_type != 'text'):
raise ValueError(dedent(f"""
Yikes - Labels can only be created from `dataset_type='tabular' or 'text'`.
But you provided `dataset_type`: <{d.dataset_type}>
"""))
d_cols = Dataset.get_main_tabular(dataset_id).columns
# Check that the user-provided columns exist.
all_cols_found = all(col in d_cols for col in columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `columns` that do not exist in the Dataset.\n")
# Check for duplicates of this label that already exist.
cols_aplha = sorted(columns)
d_labels = d.labels
count = d_labels.count()
if (count > 0):
for l in d_labels:
l_id = str(l.id)
l_cols = l.columns
l_cols_alpha = sorted(l_cols)
if (cols_aplha == l_cols_alpha):
raise ValueError(f"\nYikes - This Dataset already has Label <id:{l_id}> with the same columns.\nCannot create duplicate.\n")
column_count = len(columns)
label_df = Dataset.to_pandas(id=dataset_id, columns=columns)
"""
- When multiple columns are provided, they must be OHE.
- Figure out column count because classification_binary and associated
metrics can't be run on > 2 columns.
- Negative values do not alter type of numpy int64 and float64 arrays.
"""
if (column_count > 1):
unique_values = []
for c in columns:
uniques = label_df[c].unique()
unique_values.append(uniques)
if (len(uniques) == 1):
print(
f"Warning - There is only 1 unique value for this label column.\n" \
f"Unique value: <{uniques[0]}>\n" \
f"Label column: <{c}>\n"
)
flat_uniques = np.concatenate(unique_values).ravel()
all_uniques = np.unique(flat_uniques).tolist()
for i in all_uniques:
if (
((i == 0) or (i == 1))
or
((i == 0.) or (i == 1.))
):
pass
else:
raise ValueError(dedent(f"""
Yikes - When multiple columns are provided, they must be One Hot Encoded:
Unique values of your columns were neither (0,1) or (0.,1.) or (0.0,1.0).
The columns you provided contained these unique values: {all_uniques}
"""))
unique_classes = all_uniques
del label_df
# Now check if each row in the labels is truly OHE.
label_arr = Dataset.to_numpy(id=dataset_id, columns=columns)
for i, arr in enumerate(label_arr):
if 1 in arr:
arr = list(arr)
arr.remove(1)
if 1 in arr:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains multiple hot columns where value is 1.
"""))
else:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains no hot columns where value is 1.
"""))
elif (column_count == 1):
# At this point, `label_df` is a single column df that needs to fected as a Series.
col = columns[0]
label_series = label_df[col]
label_dtype = label_series.dtype
if (np.issubdtype(label_dtype, np.floating)):
unique_classes = None
else:
unique_classes = label_series.unique().tolist()
class_count = len(unique_classes)
if (
(np.issubdtype(label_dtype, np.signedinteger))
or
(np.issubdtype(label_dtype, np.unsignedinteger))
):
if (class_count >= 5):
print(
f"Tip - Detected `unique_classes >= {class_count}` for an integer Label." \
f"If this Label is not meant to be categorical, then we recommend you convert to a float-based dtype." \
f"Although you'll still be able to bin these integers when it comes time to make a Splitset."
)
if (class_count == 1):
print(
f"Tip - Only detected 1 unique label class. Should have 2 or more unique classes." \
f"Your Label's only class was: <{unique_classes[0]}>."
)
l = Label.create(
dataset = d
, columns = columns
, column_count = column_count
, unique_classes = unique_classes
)
return l
def to_pandas(id:int, samples:list=None):
samples = listify(samples)
l_frame = Label.get_label(id=id, numpy_or_pandas='pandas', samples=samples)
return l_frame
def to_numpy(id:int, samples:list=None):
samples = listify(samples)
l_arr = Label.get_label(id=id, numpy_or_pandas='numpy', samples=samples)
return l_arr
def get_label(id:int, numpy_or_pandas:str, samples:list=None):
samples = listify(samples)
l = Label.get_by_id(id)
l_cols = l.columns
dataset_id = l.dataset.id
if (numpy_or_pandas == 'numpy'):
lf = Dataset.to_numpy(
id = dataset_id
, columns = l_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
lf = Dataset.to_pandas(
id = dataset_id
, columns = l_cols
, samples = samples
)
return lf
def get_dtypes(
id:int
):
l = Label.get_by_id(id)
dataset = l.dataset
l_cols = l.columns
tabular_dtype = Dataset.get_main_tabular(dataset.id).dtypes
label_dtypes = {}
for key,value in tabular_dtype.items():
for col in l_cols:
if (col == key):
label_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return label_dtypes
def make_labelcoder(
id:int
, sklearn_preprocess:object
):
lc = Labelcoder.from_label(
label_id = id
, sklearn_preprocess = sklearn_preprocess
)
return lc
def get_latest_labelcoder(id:int):
label = Label.get_by_id(id)
labelcoders = list(label.labelcoders)
# Check if list empty.
if (not labelcoders):
return None
else:
return labelcoders[-1]
class Feature(BaseModel):
"""
- Remember, a Feature is just a record of the columns being used.
- Decided not to go w subclasses of Unsupervised and Supervised because that would complicate the SDK for the user,
and it essentially forked every downstream model into two subclasses.
- PCA components vary across features. When different columns are used those columns have different component values.
"""
columns = JSONField(null=True)
columns_excluded = JSONField(null=True)
dataset = ForeignKeyField(Dataset, backref='features')
def from_dataset(
dataset_id:int
, include_columns:list=None
, exclude_columns:list=None
#Future: runPCA #,run_pca:boolean=False # triggers PCA analysis of all columns
):
"""
As we get further away from the `Dataset.<Types>` they need less isolation.
"""
dataset = Dataset.get_by_id(dataset_id)
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
if (dataset.dataset_type == 'image'):
# Just passes the Dataset through for now.
if (include_columns is not None) or (exclude_columns is not None):
raise ValueError("\nYikes - The `Dataset.Image` classes supports neither the `include_columns` nor `exclude_columns` arguemnt.\n")
columns = None
columns_excluded = None
elif (dataset.dataset_type == 'tabular' or dataset.dataset_type == 'text' or dataset.dataset_type == 'sequence'):
d_cols = Dataset.get_main_tabular(dataset_id).columns
if ((include_columns is not None) and (exclude_columns is not None)):
raise ValueError("\nYikes - You can set either `include_columns` or `exclude_columns`, but not both.\n")
if (include_columns is not None):
# check columns exist
all_cols_found = all(col in d_cols for col in include_columns)
if (not all_cols_found):
raise ValueError("\nYikes - You specified `include_columns` that do not exist in the Dataset.\n")
# inclusion
columns = include_columns
# exclusion
columns_excluded = d_cols
for col in include_columns:
columns_excluded.remove(col)
elif (exclude_columns is not None):
all_cols_found = all(col in d_cols for col in exclude_columns)
if (not all_cols_found):
raise ValueError("\nYikes - You specified `exclude_columns` that do not exist in the Dataset.\n")
# exclusion
columns_excluded = exclude_columns
# inclusion
columns = d_cols
for col in exclude_columns:
columns.remove(col)
if (not columns):
raise ValueError("\nYikes - You cannot exclude every column in the Dataset. For there will be nothing to analyze.\n")
else:
columns = d_cols
columns_excluded = None
"""
- Check that this Dataset does not already have a Feature that is exactly the same.
- There are less entries in `excluded_columns` so maybe it's faster to compare that.
"""
if columns_excluded is not None:
cols_aplha = sorted(columns_excluded)
else:
cols_aplha = None
d_features = dataset.features
count = d_features.count()
if (count > 0):
for f in d_features:
f_id = str(f.id)
f_cols = f.columns_excluded
if (f_cols is not None):
f_cols_alpha = sorted(f_cols)
else:
f_cols_alpha = None
if (cols_aplha == f_cols_alpha):
raise ValueError(dedent(f"""
Yikes - This Dataset already has Feature <id:{f_id}> with the same columns.
Cannot create duplicate.
"""))
feature = Feature.create(
dataset = dataset
, columns = columns
, columns_excluded = columns_excluded
)
return feature
def to_pandas(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_frame = Feature.get_feature(
id = id
, numpy_or_pandas = 'pandas'
, samples = samples
, columns = columns
)
return f_frame
def to_numpy(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_arr = Feature.get_feature(
id = id
, numpy_or_pandas = 'numpy'
, samples = samples
, columns = columns
)
return f_arr
def get_feature(
id:int
, numpy_or_pandas:str
, samples:list = None
, columns:list = None
):
feature = Feature.get_by_id(id)
samples = listify(samples)
columns = listify(columns)
f_cols = feature.columns
if (columns is not None):
for c in columns:
if c not in f_cols:
raise ValueError("\nYikes - Cannot fetch column '{c}' because it is not in `Feature.columns`.\n")
f_cols = columns
dataset_id = feature.dataset.id
if (numpy_or_pandas == 'numpy'):
f_data = Dataset.to_numpy(
id = dataset_id
, columns = f_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
f_data = Dataset.to_pandas(
id = dataset_id
, columns = f_cols
, samples = samples
)
return f_data
def get_dtypes(
id:int
):
feature = Feature.get_by_id(id)
dataset = feature.dataset
if (dataset.dataset_type == 'image'):
raise ValueError("\nYikes - `feature.dataset.dataset_type=='image'` does not have dtypes.\n")
f_cols = feature.columns
tabular_dtype = Dataset.get_main_tabular(dataset.id).dtypes
feature_dtypes = {}
for key,value in tabular_dtype.items():
for col in f_cols:
if (col == key):
feature_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return feature_dtypes
def make_splitset(
id:int
, label_id:int = None
, size_test:float = None
, size_validation:float = None
, bin_count:int = None
, unsupervised_stratify_col:str = None
):
splitset = Splitset.from_feature(
feature_id = id
, label_id = label_id
, size_test = size_test
, size_validation = size_validation
, bin_count = bin_count
, unsupervised_stratify_col = unsupervised_stratify_col
)
return splitset
def make_encoderset(
id:int
, encoder_count:int = 0
, description:str = None
):
encoderset = Encoderset.from_feature(
feature_id = id
, encoder_count = 0
, description = description
)
return encoderset
def get_latest_encoderset(id:int):
feature = Feature.get_by_id(id)
encodersets = list(feature.encodersets)
# Check if list empty.
if (not encodersets):
return None
else:
return encodersets[-1]
def make_window(id:int, size_window:int, size_shift:int):
feature = Feature.get_by_id(id)
window = Window.from_feature(
size_window = size_window
, size_shift = size_shift
, feature_id = feature.id
)
return window
class Window(BaseModel):
size_window = IntegerField()
size_shift = IntegerField()
feature = ForeignKeyField(Feature, backref='windows')
def from_feature(
feature_id:int
, size_window:int
, size_shift:int
):
feature = Feature.get_by_id(feature_id)
file_count = feature.dataset.file_count
if ((size_window < 1) or (size_window > (file_count - size_shift))):
raise ValueError("\nYikes - Failed: `(size_window < 1) or (size_window > (file_count - size_shift)`.\n")
if ((size_shift < 1) or (size_shift > (file_count - size_window))):
raise ValueError("\nYikes - Failed: `(size_shift < 1) or (size_shift > (file_count - size_window)`.\n")
window = Window.create(
size_window = size_window
, size_shift = size_shift
, feature_id = feature.id
)
return window
def shift_window_arrs(id:int, ndarray:object):
window = Window.get_by_id(id)
file_count = window.feature.dataset.file_count
size_window = window.size_window
size_shift = window.size_shift
total_intervals = math.floor((file_count - size_shift) / size_window)
#prune_shifted_lag = 0
prune_shifted_lead = file_count - (total_intervals * size_window)
prune_unshifted_lag = -(size_shift)
prune_unshifted_lead = file_count - (total_intervals * size_window) - size_shift
arr_shifted = arr_shifted = ndarray[prune_shifted_lead:]#:prune_shifted_lag
arr_unshifted = ndarray[prune_unshifted_lead:prune_unshifted_lag]
arr_shifted_shapes = arr_shifted.shape
arr_shifted = arr_shifted.reshape(
total_intervals#3D
, arr_shifted_shapes[1]*math.floor(arr_shifted_shapes[0] / total_intervals)#rows
, arr_shifted_shapes[2]#cols
)
arr_unshifted = arr_unshifted.reshape(
total_intervals#3D
, arr_shifted_shapes[1]*math.floor(arr_shifted_shapes[0] / total_intervals)#rows
, arr_shifted_shapes[2]#cols
)
return arr_shifted, arr_unshifted
class Splitset(BaseModel):
"""
- Here the `samples_` attributes contain indices.
-ToDo: store and visualize distributions of each column in training split, including label.
-Future: is it useful to specify the size of only test for unsupervised learning?
"""
samples = JSONField()
sizes = JSONField()
supervision = CharField()
has_test = BooleanField()
has_validation = BooleanField()
bin_count = IntegerField(null=True)
unsupervised_stratify_col = CharField(null=True)
label = ForeignKeyField(Label, deferrable='INITIALLY DEFERRED', null=True, backref='splitsets')
# Featureset is a many-to-many relationship between Splitset and Feature.
def make(
feature_ids:list
, label_id:int = None
, size_test:float = None
, size_validation:float = None
, bin_count:float = None
, unsupervised_stratify_col:str = None
):
# The first feature_id is used for stratification, so it's best to use Tabular data in this slot.
# --- Verify splits ---
if (size_test is not None):
if ((size_test <= 0.0) or (size_test >= 1.0)):
raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n")
# Don't handle `has_test` here. Need to check label first.
if ((size_validation is not None) and (size_test is None)):
raise ValueError("\nYikes - you specified a `size_validation` without setting a `size_test`.\n")
if (size_validation is not None):
if ((size_validation <= 0.0) or (size_validation >= 1.0)):
raise ValueError("\nYikes - `size_test` must be between 0.0 and 1.0\n")
sum_test_val = size_validation + size_test
if sum_test_val >= 1.0:
raise ValueError("\nYikes - Sum of `size_test` + `size_test` must be between 0.0 and 1.0 to leave room for training set.\n")
"""
Have to run train_test_split twice do the math to figure out the size of 2nd split.
Let's say I want {train:0.67, validation:0.13, test:0.20}
The first test_size is 20% which leaves 80% of the original data to be split into validation and training data.
(1.0/(1.0-0.20))*0.13 = 0.1625
"""
pct_for_2nd_split = (1.0/(1.0-size_test))*size_validation
has_validation = True
else:
has_validation = False
# --- Verify features ---
feature_ids = listify(feature_ids)
feature_lengths = []
for f_id in feature_ids:
f = Feature.get_by_id(f_id)
f_dataset = f.dataset
f_dset_type = f_dataset.dataset_type
if (f_dset_type == 'tabular' or f_dset_type == 'text'):
f_length = Dataset.get_main_file(f_dataset.id).shape['rows']
elif (f_dset_type == 'image' or f_dset_type == 'sequence'):
f_length = f_dataset.file_count
feature_lengths.append(f_length)
if (len(set(feature_lengths)) != 1):
raise ValueError("Yikes - List of features you provided contain different amounts of samples: {set(feature_lengths)}")
# --- Prepare for splitting ---
feature = Feature.get_by_id(feature_ids[0])
f_dataset = feature.dataset
f_dset_type = f_dataset.dataset_type
f_cols = feature.columns
"""
Simulate an index to be split alongside features and labels
in order to keep track of the samples being used in the resulting splits.
"""
if (f_dset_type=='tabular' or f_dset_type=='text' or f_dset_type=='sequence'):
# Could get the row count via `f_dataset.get_main_file().shape['rows']`, but need array later.
feature_array = f_dataset.to_numpy(columns=f_cols) #Used below for splitting.
# Works on both 2D and 3D data.
sample_count = feature_array.shape[0]
elif (f_dset_type=='image'):
sample_count = f_dataset.file_count
arr_idx = np.arange(sample_count)
samples = {}
sizes = {}
if (size_test is None):
size_test = 0.30
# ------ Stratification prep ------
if (label_id is not None):
has_test = True
supervision = "supervised"
if (unsupervised_stratify_col is not None):
raise ValueError("\nYikes - `unsupervised_stratify_col` cannot be present is there is a Label.\n")
# We don't need to prevent duplicate Label/Feature combos because Splits generate different samples each time.
label = Label.get_by_id(label_id)
# Check number of samples in Label vs Feature, because they can come from different Datasets.
stratify_arr = label.to_numpy()
l_length = label.dataset.get_main_file().shape['rows']
if (label.dataset.id != f_dataset.id):
if (l_length != sample_count):
raise ValueError("\nYikes - The Datasets of your Label and Feature do not contains the same number of samples.\n")
# check for OHE cols and reverse them so we can still stratify ordinally.
if (stratify_arr.shape[1] > 1):
stratify_arr = np.argmax(stratify_arr, axis=1)
# OHE dtype returns as int64
stratify_dtype = stratify_arr.dtype
elif (label_id is None):
has_test = False
supervision = "unsupervised"
label = None
indices_lst_train = arr_idx.tolist()
if (unsupervised_stratify_col is not None):
if (f_dset_type=='image'):
raise ValueError("\nYikes - `unsupervised_stratify_col` cannot be used with `dataset_type=='image'`.\n")
column_names = f_dataset.get_main_tabular().columns
col_index = Job.colIndices_from_colNames(column_names=column_names, desired_cols=[unsupervised_stratify_col])[0]
stratify_arr = feature_array[:,:,col_index]
stratify_dtype = stratify_arr.dtype
if (f_dset_type=='sequence'):
if (stratify_arr.shape[1] > 1):
# We need a single value, so take the median or mode of each 1D array.
if (np.issubdtype(stratify_dtype, np.number) == True):
stratify_arr = np.median(stratify_arr, axis=1)
if (np.issubdtype(stratify_dtype, np.number) == False):
modes = [scipy.stats.mode(arr1D)[0][0] for arr1D in stratify_arr]
stratify_arr = np.array(modes)
# Now both are 1D so reshape to 2D.
stratify_arr = stratify_arr.reshape(stratify_arr.shape[0], 1)
elif (unsupervised_stratify_col is None):
if (bin_count is not None):
raise ValueError("\nYikes - `bin_count` cannot be set if `unsupervised_stratify_col is None` and `label_id is None`.\n")
stratify_arr = None#Used in if statements below.
# ------ Stratified vs Unstratified ------
if (stratify_arr is not None):
"""
- `sklearn.model_selection.train_test_split` = https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
- `shuffle` happens before the split. Although preserves a df's original index, we don't need to worry about that because we are providing our own indices.
- Don't include the Dataset.Image.feature pixel arrays in stratification.
"""
# `bin_count` is only returned so that we can persist it.
stratifier1, bin_count = Splitset.stratifier_by_dtype_binCount(
stratify_dtype = stratify_dtype,
stratify_arr = stratify_arr,
bin_count = bin_count
)
if (f_dset_type=='tabular' or f_dset_type=='text' or f_dset_type=='sequence'):
features_train, features_test, stratify_train, stratify_test, indices_train, indices_test = train_test_split(
feature_array, stratify_arr, arr_idx
, test_size = size_test
, stratify = stratifier1
, shuffle = True
)
if (size_validation is not None):
stratifier2, bin_count = Splitset.stratifier_by_dtype_binCount(
stratify_dtype = stratify_dtype,
stratify_arr = stratify_train, #This split is different from stratifier1.
bin_count = bin_count
)
features_train, features_validation, stratify_train, stratify_validation, indices_train, indices_validation = train_test_split(
features_train, stratify_train, indices_train
, test_size = pct_for_2nd_split
, stratify = stratifier2
, shuffle = True
)
elif (f_dset_type=='image'):
# Differs in that the Features not fed into `train_test_split()`.
stratify_train, stratify_test, indices_train, indices_test = train_test_split(
stratify_arr, arr_idx
, test_size = size_test
, stratify = stratifier1
, shuffle = True
)
if (size_validation is not None):
stratifier2, bin_count = Splitset.stratifier_by_dtype_binCount(
stratify_dtype = stratify_dtype,
stratify_arr = stratify_train, #This split is different from stratifier1.
bin_count = bin_count
)
stratify_train, stratify_validation, indices_train, indices_validation = train_test_split(
stratify_train, indices_train
, test_size = pct_for_2nd_split
, stratify = stratifier2
, shuffle = True
)
elif (stratify_arr is None):
if (f_dset_type=='tabular' or f_dset_type=='text' or f_dset_type=='sequence'):
features_train, features_test, indices_train, indices_test = train_test_split(
feature_array, arr_idx
, test_size = size_test
, shuffle = True
)
if (size_validation is not None):
features_train, features_validation, indices_train, indices_validation = train_test_split(
features_train, indices_train
, test_size = pct_for_2nd_split
, shuffle = True
)
elif (f_dset_type=='image'):
# Differs in that the Features not fed into `train_test_split()`.
indices_train, indices_test = train_test_split(
arr_idx
, test_size = size_test
, shuffle = True
)
if (size_validation is not None):
indices_train, indices_validation = train_test_split(
indices_train
, test_size = pct_for_2nd_split
, shuffle = True
)
if (size_validation is not None):
indices_lst_validation = indices_validation.tolist()
samples["validation"] = indices_lst_validation
indices_lst_train, indices_lst_test = indices_train.tolist(), indices_test.tolist()
samples["train"] = indices_lst_train
samples["test"] = indices_lst_test
size_train = 1.0 - size_test
if (size_validation is not None):
size_train -= size_validation
count_validation = len(indices_lst_validation)
sizes["validation"] = {"percent": size_validation, "count": count_validation}
count_test = len(indices_lst_test)
count_train = len(indices_lst_train)
sizes["test"] = {"percent": size_test, "count": count_test}
sizes["train"] = {"percent": size_train, "count": count_train}
splitset = Splitset.create(
label = label
, samples = samples
, sizes = sizes
, supervision = supervision
, has_test = has_test
, has_validation = has_validation
, bin_count = bin_count
, unsupervised_stratify_col = unsupervised_stratify_col
)
try:
for f_id in feature_ids:
feature = Feature.get_by_id(f_id)
Featureset.create(splitset=splitset, feature=feature)
except:
splitset.delete_instance() # Orphaned.
raise
return splitset
def values_to_bins(array_to_bin:object, bin_count:int):
"""
Overwites continuous Label values with bin numbers for statification & folding.
Switched to `pd.qcut` because `np.digitize` never had enough samples in the up the leftmost/right bin.
"""
# Make 1D for qcut.
array_to_bin = array_to_bin.flatten()
# For really unbalanced labels, I ran into errors where bin boundaries would be duplicates all the way down to 2 bins.
# Setting `duplicates='drop'` to address this.
bin_numbers = pd.qcut(x=array_to_bin, q=bin_count, labels=False, duplicates='drop')
# Convert 1D array back to 2D for the rest of the program.
bin_numbers = np.reshape(bin_numbers, (-1, 1))
return bin_numbers
def stratifier_by_dtype_binCount(stratify_dtype:object, stratify_arr:object, bin_count:int=None):
# Based on the dtype and bin_count determine how to stratify.
# Automatically bin floats.
if np.issubdtype(stratify_dtype, np.floating):
if (bin_count is None):
bin_count = 3
stratifier = Splitset.values_to_bins(array_to_bin=stratify_arr, bin_count=bin_count)
# Allow ints to pass either binned or unbinned.
elif (
(np.issubdtype(stratify_dtype, np.signedinteger))
or
(np.issubdtype(stratify_dtype, np.unsignedinteger))
):
if (bin_count is not None):
stratifier = Splitset.values_to_bins(array_to_bin=stratify_arr, bin_count=bin_count)
elif (bin_count is None):
# Assumes the int is for classification.
stratifier = stratify_arr
# Reject binned objs.
elif (np.issubdtype(stratify_dtype, np.number) == False):
if (bin_count is not None):
raise ValueError(dedent("""
Yikes - Your Label is not numeric (neither `np.floating`, `np.signedinteger`, `np.unsignedinteger`).
Therefore, you cannot provide a value for `bin_count`.
\n"""))
elif (bin_count is None):
stratifier = stratify_arr
return stratifier, bin_count
def get_features(id:int):
splitset = Splitset.get_by_id(id)
features = list(Feature.select().join(Featureset).where(Featureset.splitset==splitset))
return features
def make_foldset(
id:int
, fold_count:int = None
, bin_count:int = None
):
foldset = Foldset.from_splitset(
splitset_id = id
, fold_count = fold_count
, bin_count = bin_count
)
return foldset
class Featureset(BaseModel):
"""Featureset is a many-to-many relationship between Splitset and Feature."""
splitset = ForeignKeyField(Splitset, backref='featuresets')
feature = ForeignKeyField(Feature, backref='featuresets')
class Foldset(BaseModel):
"""
- Contains aggregate summary statistics and evaluate metrics for all Folds.
- Works the same for all dataset types because only the labels are used for stratification.
"""
fold_count = IntegerField()
random_state = IntegerField()
bin_count = IntegerField(null=True) # For stratifying continuous features.
#ToDo: max_samples_per_bin = IntegerField()
#ToDo: min_samples_per_bin = IntegerField()
splitset = ForeignKeyField(Splitset, backref='foldsets')
def from_splitset(
splitset_id:int
, fold_count:int = None
, bin_count:int = None
):
splitset = Splitset.get_by_id(splitset_id)
new_random = False
while new_random == False:
random_state = random.randint(0, 4294967295) #2**32 - 1 inclusive
matching_randoms = splitset.foldsets.select().where(Foldset.random_state==random_state)
count_matches = matching_randoms.count()
if count_matches == 0:
new_random = True
if (fold_count is None):
fold_count = 5 # More likely than 4 to be evenly divisible.
else:
if (fold_count < 2):
raise ValueError(dedent(f"""
Yikes - Cross validation requires multiple folds.
But you provided `fold_count`: <{fold_count}>.
"""))
elif (fold_count == 2):
print("\nWarning - Instead of two folds, why not just use a validation split?\n")
# Get the training indices. The actual values of the features don't matter, only label values needed for stratification.
arr_train_indices = splitset.samples["train"]
if (splitset.supervision=="supervised"):
stratify_arr = splitset.label.to_numpy(samples=arr_train_indices)
stratify_dtype = stratify_arr.dtype
elif (splitset.supervision=="unsupervised"):
if (splitset.unsupervised_stratify_col is not None):
stratify_arr = splitset.get_features()[0].to_numpy(
columns = splitset.unsupervised_stratify_col,
samples = arr_train_indices
)
stratify_dtype = stratify_arr.dtype
if (stratify_arr.shape[1] > 1):
# We need a single value, so take the median or mode of each 1D array.
if (np.issubdtype(stratify_dtype, np.number) == True):
stratify_arr = np.median(stratify_arr, axis=1)
if (np.issubdtype(stratify_dtype, np.number) == False):
modes = [scipy.stats.mode(arr1D)[0][0] for arr1D in stratify_arr]
stratify_arr = np.array(modes)
# Now both are 1D so reshape to 2D.
stratify_arr = stratify_arr.reshape(stratify_arr.shape[0], 1)
elif (splitset.unsupervised_stratify_col is None):
if (bin_count is not None):
raise ValueError("\nYikes - `bin_count` cannot be set if `unsupervised_stratify_col is None` and `label_id is None`.\n")
stratify_arr = None#Used in if statements below.
# If the Labels are binned *overwite* the values w bin numbers. Otherwise untouched.
if (stratify_arr is not None):
# Bin the floats.
if (np.issubdtype(stratify_dtype, np.floating)):
if (bin_count is None):
bin_count = splitset.bin_count #Inherit.
stratify_arr = Splitset.values_to_bins(
array_to_bin = stratify_arr
, bin_count = bin_count
)
# Allow ints to pass either binned or unbinned.
elif (
(np.issubdtype(stratify_dtype, np.signedinteger))
or
(np.issubdtype(stratify_dtype, np.unsignedinteger))
):
if (bin_count is not None):
if (splitset.bin_count is None):
print(dedent("""
Warning - Previously you set `Splitset.bin_count is None`
but now you are trying to set `Foldset.bin_count is not None`.
This can result in incosistent stratification processes being
used for training samples versus validation and test samples.
\n"""))
stratify_arr = Splitset.values_to_bins(
array_to_bin = stratify_arr
, bin_count = bin_count
)
else:
if (bin_count is not None):
raise ValueError(dedent("""
Yikes - The column you are stratifying by is not a numeric dtype (neither `np.floating`, `np.signedinteger`, `np.unsignedinteger`).
Therefore, you cannot provide a value for `bin_count`.
\n"""))
train_count = len(arr_train_indices)
remainder = train_count % fold_count
if (remainder != 0):
print(
f"Warning - The number of samples <{train_count}> in your training Split\n" \
f"is not evenly divisible by the `fold_count` <{fold_count}> you specified.\n" \
f"This can result in misleading performance metrics for the last Fold.\n"
)
foldset = Foldset.create(
fold_count = fold_count
, random_state = random_state
, bin_count = bin_count
, splitset = splitset
)
try:
# Stratified vs Unstratified.
if (stratify_arr is None):
# Nothing to stratify with.
kf = KFold(
n_splits=fold_count
, shuffle=True
, random_state=random_state
)
splitz_gen = kf.split(arr_train_indices)
elif (stratify_arr is not None):
skf = StratifiedKFold(
n_splits=fold_count
, shuffle=True
, random_state=random_state
)
splitz_gen = skf.split(arr_train_indices, stratify_arr)
i = -1
for index_folds_train, index_fold_validation in splitz_gen:
i+=1
fold_samples = {}
fold_samples["folds_train_combined"] = index_folds_train.tolist()
fold_samples["fold_validation"] = index_fold_validation.tolist()
Fold.create(
fold_index = i
, samples = fold_samples
, foldset = foldset
)
except:
foldset.delete_instance() # Orphaned.
raise
return foldset
class Fold(BaseModel):
"""
- A Fold is 1 of many cross-validation sets generated as part of a Foldset.
- The `samples` attribute contains the indices of `folds_train_combined` and `fold_validation`,
where `fold_validation` is the rotating fold that gets left out.
"""
fold_index = IntegerField() # order within the Foldset.
samples = JSONField()
# contains_all_classes = BooleanField()
foldset = ForeignKeyField(Foldset, backref='folds')
class Encoderset(BaseModel):
"""
- Preprocessing should not happen prior to Dataset ingestion because you need to do it after the split to avoid bias.
For example, encoder.fit() only on training split - then .transform() train, validation, and test.
- Don't restrict a preprocess to a specific Algorithm. Many algorithms are created as different hyperparameters are tried.
Also, Preprocess is somewhat predetermined by the dtypes present in the Label and Feature.
- Although Encoderset seems uneccessary, you need something to sequentially group the Featurecoders onto.
- In future, maybe Labelcoder gets split out from Encoderset and it becomes Featurecoderset.
"""
encoder_count = IntegerField()
description = CharField(null=True)
feature = ForeignKeyField(Feature, backref='encodersets')
def from_feature(
feature_id:int
, encoder_count:int = 0
, description:str = None
):
feature = Feature.get_by_id(feature_id)
encoderset = Encoderset.create(
encoder_count = encoder_count
, description = description
, feature = feature
)
return encoderset
def make_featurecoder(
id:int
, sklearn_preprocess:object
, include:bool = True
, verbose:bool = True
, dtypes:list = None
, columns:list = None
):
dtypes = listify(dtypes)
columns = listify(columns)
fc = Featurecoder.from_encoderset(
encoderset_id = id
, sklearn_preprocess = sklearn_preprocess
, include = include
, dtypes = dtypes
, columns = columns
, verbose = verbose
)
return fc
class Labelcoder(BaseModel):
"""
- `is_fit_train` toggles if the encoder is either `.fit(<training_split/fold>)` to
avoid bias or `.fit(<entire_dataset>)`.
- Categorical (ordinal and OHE) encoders are best applied to entire dataset in case
there are classes missing in the split/folds of validation/ test data.
- Whereas numerical encoders are best fit only to the training data.
- Because there's only 1 encoder that runs and it uses all columns, Labelcoder
is much simpler to validate and run in comparison to Featurecoder.
"""
only_fit_train = BooleanField()
is_categorical = BooleanField()
sklearn_preprocess = PickleField()
matching_columns = JSONField() # kinda unecessary, but maybe multi-label future.
encoding_dimension = CharField()
label = ForeignKeyField(Label, backref='labelcoders')
def from_label(
label_id:int
, sklearn_preprocess:object
):
label = Label.get_by_id(label_id)
sklearn_preprocess, only_fit_train, is_categorical = Labelcoder.check_sklearn_attributes(
sklearn_preprocess, is_label=True
)
samples_to_encode = label.to_numpy()
# 2. Test Fit.
try:
fitted_encoders, encoding_dimension = Labelcoder.fit_dynamicDimensions(
sklearn_preprocess = sklearn_preprocess
, samples_to_fit = samples_to_encode
)
except:
print(f"\nYikes - During a test encoding, failed to `fit()` instantiated `{sklearn_preprocess}` on `label.to_numpy())`.\n")
raise
# 3. Test Transform/ Encode.
try:
"""
- During `Job.run`, it will touch every split/fold regardless of what it was fit on
so just validate it on whole dataset.
"""
Labelcoder.transform_dynamicDimensions(
fitted_encoders = fitted_encoders
, encoding_dimension = encoding_dimension
, samples_to_transform = samples_to_encode
)
except:
raise ValueError(dedent("""
During testing, the encoder was successfully `fit()` on the labels,
but, it failed to `transform()` labels of the dataset as a whole.
"""))
else:
pass
lc = Labelcoder.create(
only_fit_train = only_fit_train
, sklearn_preprocess = sklearn_preprocess
, encoding_dimension = encoding_dimension
, matching_columns = label.columns
, is_categorical = is_categorical
, label = label
)
return lc
def check_sklearn_attributes(sklearn_preprocess:object, is_label:bool):
#This function is used by Featurecoder too, so don't put label-specific things in here.
if (inspect.isclass(sklearn_preprocess)):
raise ValueError(dedent("""
Yikes - The encoder you provided is a class name, but it should be a class instance.\n
Class (incorrect): `OrdinalEncoder`
Instance (correct): `OrdinalEncoder()`
"""))
# Encoder parent modules vary: `sklearn.preprocessing._data` vs `sklearn.preprocessing._label`
# Feels cleaner than this: https://stackoverflow.com/questions/14570802/python-check-if-object-is-instance-of-any-class-from-a-certain-module
coder_type = str(type(sklearn_preprocess))
if ('sklearn.preprocessing' not in coder_type):
raise ValueError(dedent("""
Yikes - At this point in time, only `sklearn.preprocessing` encoders are supported.
https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
"""))
elif ('sklearn.preprocessing' in coder_type):
if (not hasattr(sklearn_preprocess, 'fit')):
raise ValueError(dedent("""
Yikes - The `sklearn.preprocessing` method you provided does not have a `fit` method.\n
Please use one of the uppercase methods instead.
For example: use `RobustScaler` instead of `robust_scale`.
"""))
if (hasattr(sklearn_preprocess, 'sparse')):
if (sklearn_preprocess.sparse == True):
try:
sklearn_preprocess.sparse = False
print(dedent("""
=> Info - System overriding user input to set `sklearn_preprocess.sparse=False`.
This would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.
"""))
except:
raise ValueError(dedent(f"""
Yikes - Detected `sparse==True` attribute of {sklearn_preprocess}.
System attempted to override this to False, but failed.
FYI `sparse` is True by default if left blank.
This would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.\n
Please try again with False. For example, `OneHotEncoder(sparse=False)`.
"""))
if (hasattr(sklearn_preprocess, 'copy')):
if (sklearn_preprocess.copy == True):
try:
sklearn_preprocess.copy = False
print(dedent("""
=> Info - System overriding user input to set `sklearn_preprocess.copy=False`.
This saves memory when concatenating the output of many encoders.
"""))
except:
raise ValueError(dedent(f"""
Yikes - Detected `copy==True` attribute of {sklearn_preprocess}.
System attempted to override this to False, but failed.
FYI `copy` is True by default if left blank, which consumes memory.\n
Please try again with 'copy=False'.
For example, `StandardScaler(copy=False)`.
"""))
if (hasattr(sklearn_preprocess, 'sparse_output')):
if (sklearn_preprocess.sparse_output == True):
try:
sklearn_preprocess.sparse_output = False
print(dedent("""
=> Info - System overriding user input to set `sklearn_preprocess.sparse_output=False`.
This would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.
"""))
except:
raise ValueError(dedent(f"""
Yikes - Detected `sparse_output==True` attribute of {sklearn_preprocess}.
System attempted to override this to True, but failed.
Please try again with 'sparse_output=False'.
This would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.\n
For example, `LabelBinarizer(sparse_output=False)`.
"""))
if (hasattr(sklearn_preprocess, 'order')):
if (sklearn_preprocess.order == 'F'):
try:
sklearn_preprocess.order = 'C'
print(dedent("""
=> Info - System overriding user input to set `sklearn_preprocess.order='C'`.
This changes the output shape of the
"""))
except:
raise ValueError(dedent(f"""
System attempted to override this to 'C', but failed.
Yikes - Detected `order=='F'` attribute of {sklearn_preprocess}.
Please try again with 'order='C'.
For example, `PolynomialFeatures(order='C')`.
"""))
if (hasattr(sklearn_preprocess, 'encode')):
if (sklearn_preprocess.encode == 'onehot'):
# Multiple options here, so don't override user input.
raise ValueError(dedent(f"""
Yikes - Detected `encode=='onehot'` attribute of {sklearn_preprocess}.
FYI `encode` is 'onehot' by default if left blank and it predictors in 'scipy.sparse.csr.csr_matrix',
which causes Keras training to fail.\n
Please try again with 'onehot-dense' or 'ordinal'.
For example, `KBinsDiscretizer(encode='onehot-dense')`.
"""))
if (
(is_label==True)
and
(not hasattr(sklearn_preprocess, 'inverse_transform'))
):
print(dedent("""
Warning - The following encoders do not have an `inverse_transform` method.
It is inadvisable to use them to encode Labels during training,
because you may not be able to programmatically decode your raw predictions
when it comes time for inference (aka non-training predictions):
[Binarizer, KernelCenterer, Normalizer, PolynomialFeatures]
"""))
"""
- Binners like 'KBinsDiscretizer' and 'QuantileTransformer'
will place unseen observations outside bounds into existing min/max bin.
- I assume that someone won't use a custom FunctionTransformer, for categories
when all of these categories are available.
- LabelBinarizer is not threshold-based, it's more like an OHE.
"""
only_fit_train = True
stringified_coder = str(sklearn_preprocess)
is_categorical = False
for c in categorical_encoders:
if (stringified_coder.startswith(c)):
only_fit_train = False
is_categorical = True
break
return sklearn_preprocess, only_fit_train, is_categorical
def fit_dynamicDimensions(sklearn_preprocess:object, samples_to_fit:object):
"""
- Future: optimize to make sure not duplicating numpy. especially append to lists + reshape after transpose.
- There are 17 uppercase sklearn encoders, and 10 different data types across float, str, int
when consider negatives, 2D multiple columns, 2D single columns.
- Different encoders work with different data types and dimensionality.
- This function normalizes that process by coercing the dimensionality that the encoder wants,
and erroring if the wrong data type is used. The goal in doing so is to return
that dimensionality for future use.
- `samples_to_transform` is pre-filtered for the appropriate `matching_columns`.
- The rub lies in that if you have many columns, but the encoder only fits 1 column at a time,
then you return many fits for a single type of preprocess.
- Remember this is for a single Featurecoder that is potential returning multiple fits.
- UPDATE: after disabling LabelBinarizer and LabelEncoder from running on multiple columns,
everything seems to be fitting as "2D_multiColumn", but let's keep the logic for new sklearn methods.
"""
fitted_encoders = []
incompatibilities = {
"string": [
"KBinsDiscretizer", "KernelCenterer", "MaxAbsScaler",
"MinMaxScaler", "PowerTransformer", "QuantileTransformer",
"RobustScaler", "StandardScaler"
]
, "float": ["LabelBinarizer"]
, "numeric array without dimensions both odd and square (e.g. 3x3, 5x5)": ["KernelCenterer"]
}
with warnings.catch_warnings(record=True) as w:
try:
# aiqc `to_numpy()` always fetches 2D.
# Remember, we are assembling `fitted_encoders` dict, not accesing it.
fit_encoder = sklearn_preprocess.fit(samples_to_fit)
fitted_encoders.append(fit_encoder)
except:
# At this point, "2D" failed. It had 1 or more columns.
try:
width = samples_to_fit.shape[1]
if (width > 1):
# Reshape "2D many columns" to “3D of 2D single columns.”
samples_to_fit = samples_to_fit[None].T
# "2D single column" already failed. Need it to fail again to trigger except.
elif (width == 1):
# Reshape "2D single columns" to “3D of 2D single columns.”
samples_to_fit = samples_to_fit.reshape(1, samples_to_fit.shape[0], 1)
# Fit against each 2D array within the 3D array.
for i, arr in enumerate(samples_to_fit):
fit_encoder = sklearn_preprocess.fit(arr)
fitted_encoders.append(fit_encoder)
except:
# At this point, "2D single column" has failed.
try:
# So reshape the "3D of 2D_singleColumn" into "2D of 1D for each column."
# This transformation is tested for both (width==1) as well as (width>1).
samples_to_fit = samples_to_fit.transpose(2,0,1)[0]
# Fit against each column in 2D array.
for i, arr in enumerate(samples_to_fit):
fit_encoder = sklearn_preprocess.fit(arr)
fitted_encoders.append(fit_encoder)
except:
raise ValueError(dedent(f"""
Yikes - Encoder failed to fit the columns you filtered.\n
Either the data is dirty (e.g. contains NaNs),
or the encoder might not accept negative values (e.g. PowerTransformer.method='box-cox'),
or you used one of the incompatible combinations of data type and encoder seen below:\n
{incompatibilities}
"""))
else:
encoding_dimension = "1D"
else:
encoding_dimension = "2D_singleColumn"
else:
encoding_dimension = "2D_multiColumn"
return fitted_encoders, encoding_dimension
def if_1d_make_2d(array:object):
if (len(array.shape) == 1):
array = array.reshape(array.shape[0], 1)
return array
def transform_dynamicDimensions(
fitted_encoders:list
, encoding_dimension:str
, samples_to_transform:object
):
"""
- UPDATE: after disabling LabelBinarizer and LabelEncoder from running on multiple columns,
everything seems to be fitting as "2D_multiColumn", but let's keep the logic for new sklearn methods.
"""
if (encoding_dimension == '2D_multiColumn'):
# Our `to_numpy` method fetches data as 2D. So it has 1+ columns.
encoded_samples = fitted_encoders[0].transform(samples_to_transform)
encoded_samples = Labelcoder.if_1d_make_2d(array=encoded_samples)
elif (encoding_dimension == '2D_singleColumn'):
# Means that `2D_multiColumn` arrays cannot be used as is.
width = samples_to_transform.shape[1]
if (width == 1):
# It's already "2D_singleColumn"
encoded_samples = fitted_encoders[0].transform(samples_to_transform)
encoded_samples = Labelcoder.if_1d_make_2d(array=encoded_samples)
elif (width > 1):
# Data must be fed into encoder as separate '2D_singleColumn' arrays.
# Reshape "2D many columns" to “3D of 2D singleColumns” so we can loop on it.
encoded_samples = samples_to_transform[None].T
encoded_arrs = []
for i, arr in enumerate(encoded_samples):
encoded_arr = fitted_encoders[i].transform(arr)
encoded_arr = Labelcoder.if_1d_make_2d(array=encoded_arr)
encoded_arrs.append(encoded_arr)
encoded_samples = np.array(encoded_arrs).T
# From "3D of 2Ds" to "2D wide"
# When `encoded_samples` was accidentally a 3D shape, this fixed it:
"""
if (len(encoded_samples.shape) == 3):
encoded_samples = encoded_samples.transpose(
1,0,2
).reshape(
# where index represents dimension.
encoded_samples.shape[1],
encoded_samples.shape[0]*encoded_samples.shape[2]
)
"""
del encoded_arrs
elif (encoding_dimension == '1D'):
# From "2D_multiColumn" to "2D with 1D for each column"
# This `.T` works for both single and multi column.
encoded_samples = samples_to_transform.T
# Since each column is 1D, we care about rows now.
length = encoded_samples.shape[0]
if (length == 1):
encoded_samples = fitted_encoders[0].transform(encoded_samples)
# Some of these 1D encoders also output 1D.
# Need to put it back into 2D.
encoded_samples = Labelcoder.if_1d_make_2d(array=encoded_samples)
elif (length > 1):
encoded_arrs = []
for i, arr in enumerate(encoded_samples):
encoded_arr = fitted_encoders[i].transform(arr)
# Check if it is 1D before appending.
encoded_arr = Labelcoder.if_1d_make_2d(array=encoded_arr)
encoded_arrs.append(encoded_arr)
# From "3D of 2D_singleColumn" to "2D_multiColumn"
encoded_samples = np.array(encoded_arrs).T
del encoded_arrs
return encoded_samples
class Featurecoder(BaseModel):
"""
- An Encoderset can have a chain of Featurecoders.
- Encoders are applied sequential, meaning the columns encoded by `featurecoder_index=0`
are not available to `featurecoder_index=1`.
- Much validation because real-life encoding errors are cryptic and deep for beginners.
"""
featurecoder_index = IntegerField()
sklearn_preprocess = PickleField()
matching_columns = JSONField()
leftover_columns = JSONField()
leftover_dtypes = JSONField()
original_filter = JSONField()
encoding_dimension = CharField()
only_fit_train = BooleanField()
is_categorical = BooleanField()
encoderset = ForeignKeyField(Encoderset, backref='featurecoders')
def from_encoderset(
encoderset_id:int
, sklearn_preprocess:object
, include:bool = True
, dtypes:list = None
, columns:list = None
, verbose:bool = True
):
encoderset = Encoderset.get_by_id(encoderset_id)
dtypes = listify(dtypes)
columns = listify(columns)
feature = encoderset.feature
feature_cols = feature.columns
feature_dtypes = feature.get_dtypes()
existing_featurecoders = list(encoderset.featurecoders)
dataset = feature.dataset
dataset_type = dataset.dataset_type
# 1. Figure out which columns have yet to be encoded.
# Order-wise no need to validate filters if there are no columns left to filter.
# Remember Feature columns are a subset of the Dataset columns.
if (len(existing_featurecoders) == 0):
initial_columns = feature_cols
featurecoder_index = 0
elif (len(existing_featurecoders) > 0):
# Get the leftover columns from the last one.
initial_columns = existing_featurecoders[-1].leftover_columns
featurecoder_index = existing_featurecoders[-1].featurecoder_index + 1
if (len(initial_columns) == 0):
raise ValueError("\nYikes - All features already have encoders associated with them. Cannot add more Featurecoders to this Encoderset.\n")
initial_dtypes = {}
for key,value in feature_dtypes.items():
for col in initial_columns:
if (col == key):
initial_dtypes[col] = value
# Exit `c` loop early becuase matching `c` found.
break
if (verbose == True):
print(f"\n___/ featurecoder_index: {featurecoder_index} \\_________\n") # Intentionally no trailing `\n`.
# 2. Validate the lists of dtypes and columns provided as filters.
if (dataset_type == "image"):
raise ValueError("\nYikes - `Dataset.dataset_type=='image'` does not support encoding Feature.\n")
sklearn_preprocess, only_fit_train, is_categorical = Labelcoder.check_sklearn_attributes(
sklearn_preprocess, is_label=False
)
if (dtypes is not None):
for typ in dtypes:
if (typ not in set(initial_dtypes.values())):
raise ValueError(dedent(f"""
Yikes - dtype '{typ}' was not found in remaining dtypes.
Remove '{typ}' from `dtypes` and try again.
"""))
if (columns is not None):
for c in columns:
if (col not in initial_columns):
raise ValueError(dedent(f"""
Yikes - Column '{col}' was not found in remaining columns.
Remove '{col}' from `columns` and try again.
"""))
# 3a. Figure out which columns the filters apply to.
if (include==True):
# Add to this empty list via inclusion.
matching_columns = []
if ((dtypes is None) and (columns is None)):
raise ValueError("\nYikes - When `include==True`, either `dtypes` or `columns` must be provided.\n")
if (dtypes is not None):
for typ in dtypes:
for key,value in initial_dtypes.items():
if (value == typ):
matching_columns.append(key)
# Don't `break`; there can be more than one match.
if (columns is not None):
for c in columns:
# Remember that the dtype has already added some columns.
if (c not in matching_columns):
matching_columns.append(c)
elif (c in matching_columns):
# We know from validation above that the column existed in initial_columns.
# Therefore, if it no longer exists it means that dtype_exclude got to it first.
raise ValueError(dedent(f"""
Yikes - The column '{c}' was already included by `dtypes`, so this column-based filter is not valid.
Remove '{c}' from `columns` and try again.
"""))
elif (include==False):
# Prune this list via exclusion.
matching_columns = initial_columns.copy()
if (dtypes is not None):
for typ in dtypes:
for key,value in initial_dtypes.items():
if (value == typ):
matching_columns.remove(key)
# Don't `break`; there can be more than one match.
if (columns is not None):
for c in columns:
# Remember that the dtype has already pruned some columns.
if (c in matching_columns):
matching_columns.remove(c)
elif (c not in matching_columns):
# We know from validation above that the column existed in initial_columns.
# Therefore, if it no longer exists it means that dtype_exclude got to it first.
raise ValueError(dedent(f"""
Yikes - The column '{c}' was already excluded by `dtypes`,
so this column-based filter is not valid.
Remove '{c}' from `dtypes` and try again.
"""))
if (len(matching_columns) == 0):
if (include == True):
inex_str = "inclusion"
elif (include == False):
inex_str = "exclusion"
raise ValueError(f"\nYikes - There are no columns left to use after applying the dtype and column {inex_str} filters.\n")
elif (
(
(str(sklearn_preprocess).startswith("LabelBinarizer"))
or
(str(sklearn_preprocess).startswith("LabelEncoder"))
)
and
(len(matching_columns) > 1)
):
raise ValueError(dedent("""
Yikes - `LabelBinarizer` or `LabelEncoder` cannot be run on
multiple columns at once.
We have frequently observed inconsistent behavior where they
often ouput incompatible array shapes that cannot be scalable
concatenated, or they succeed in fitting, but fail at transforming.
We recommend you either use these with 1 column at a
time or switch to another encoder.
"""))
# 3b. Record the output.
leftover_columns = list(set(initial_columns) - set(matching_columns))
# This becomes leftover_dtypes.
for c in matching_columns:
del initial_dtypes[c]
original_filter = {
'include': include
, 'dtypes': dtypes
, 'columns': columns
}
# 4. Test fitting the encoder to matching columns.
samples_to_encode = feature.to_numpy(columns=matching_columns)
# Handles `Dataset.Sequence` by stacking the 2D arrays into a tall 2D array.
features_shape = samples_to_encode.shape
if (len(features_shape)==3):
rows_2D = features_shape[0] * features_shape[1]
samples_to_encode = samples_to_encode.reshape(rows_2D, features_shape[2])
fitted_encoders, encoding_dimension = Labelcoder.fit_dynamicDimensions(
sklearn_preprocess = sklearn_preprocess
, samples_to_fit = samples_to_encode
)
# 5. Test encoding the whole dataset using fitted encoder on matching columns.
try:
Labelcoder.transform_dynamicDimensions(
fitted_encoders = fitted_encoders
, encoding_dimension = encoding_dimension
, samples_to_transform = samples_to_encode
)
except:
raise ValueError(dedent("""
During testing, the encoder was successfully `fit()` on the features,
but, it failed to `transform()` features of the dataset as a whole.\n
"""))
else:
pass
featurecoder = Featurecoder.create(
featurecoder_index = featurecoder_index
, only_fit_train = only_fit_train
, is_categorical = is_categorical
, sklearn_preprocess = sklearn_preprocess
, matching_columns = matching_columns
, leftover_columns = leftover_columns
, leftover_dtypes = initial_dtypes#pruned
, original_filter = original_filter
, encoderset = encoderset
, encoding_dimension = encoding_dimension
)
if (verbose == True):
print(
f"=> The column(s) below matched your filter(s) and were ran through a test-encoding successfully.\n\n" \
f"{matching_columns}\n"
)
if (len(leftover_columns) == 0):
print(
f"=> Done. All feature column(s) have encoder(s) associated with them.\n" \
f"No more Featurecoders can be added to this Encoderset.\n"
)
elif (len(leftover_columns) > 0):
print(
f"=> The remaining column(s) and dtype(s) can be used in downstream Featurecoder(s):\n" \
f"{pprint.pformat(initial_dtypes)}\n"
)
return featurecoder
class Algorithm(BaseModel):
"""
- Remember, pytorch and mxnet handle optimizer/loss outside the model definition as part of the train.
- Could do a `.py` file as an alternative to Pickle.
- Currently waiting for coleifer to accept prospect of a DillField
https://github.com/coleifer/peewee/issues/2385
"""
library = CharField()
analysis_type = CharField()#classification_multi, classification_binary, regression, clustering.
fn_build = BlobField()
fn_lose = BlobField() # null? do unsupervised algs have loss?
fn_optimize = BlobField()
fn_train = BlobField()
fn_predict = BlobField()
# --- used by `select_fn_lose()` ---
def keras_regression_lose(**hp):
loser = keras.losses.MeanAbsoluteError()
return loser
def keras_binary_lose(**hp):
loser = keras.losses.BinaryCrossentropy()
return loser
def keras_multiclass_lose(**hp):
loser = keras.losses.CategoricalCrossentropy()
return loser
def pytorch_binary_lose(**hp):
loser = torch.nn.BCELoss()
return loser
def pytorch_multiclass_lose(**hp):
# ptrckblck says `nn.NLLLoss()` will work too.
loser = torch.nn.CrossEntropyLoss()
return loser
def pytorch_regression_lose(**hp):
loser = torch.nn.L1Loss()#mean absolute error.
return loser
# --- used by `select_fn_optimize()` ---
"""
- Eventually could help the user select an optimizer based on topology (e.g. depth),
but Adamax works great for me everywhere.
- `**hp` needs to be included because that's how it is called in training loop.
"""
def keras_optimize(**hp):
optimizer = keras.optimizers.Adamax(learning_rate=0.01)
return optimizer
def pytorch_optimize(model, **hp):
optimizer = torch.optim.Adamax(model.parameters(),lr=0.01)
return optimizer
# --- used by `select_fn_predict()` ---
def keras_multiclass_predict(model, samples_predict):
# Shows the probabilities of each class coming out of softmax neurons:
# array([[9.9990356e-01, 9.6374511e-05, 3.3754202e-10],...])
probabilities = model.predict(samples_predict['features'])
# This is the official keras replacement for multiclass `.predict_classes()`
# Returns one ordinal array per sample: `[[0][1][2][3]]`
prediction = np.argmax(probabilities, axis=-1)
return prediction, probabilities
def keras_binary_predict(model, samples_predict):
# Sigmoid output is between 0 and 1.
# It's not technically a probability, but it is still easy to interpret.
probability = model.predict(samples_predict['features'])
# This is the official keras replacement for binary classes `.predict_classes()`.
# Returns one array per sample: `[[0][1][0][1]]`.
prediction = (probability > 0.5).astype("int32")
return prediction, probability
def keras_regression_predict(model, samples_predict):
prediction = model.predict(samples_predict['features'])
# ^ Output is a single value, not `probability, prediction`
return prediction
def pytorch_binary_predict(model, samples_predict):
probability = model(samples_predict['features'])
# Convert tensor back to numpy for AIQC metrics.
probability = probability.detach().numpy()
prediction = (probability > 0.5).astype("int32")
# Both objects are numpy.
return prediction, probability
def pytorch_multiclass_predict(model, samples_predict):
probabilities = model(samples_predict['features'])
# Convert tensor back to numpy for AIQC metrics.
probabilities = probabilities.detach().numpy()
prediction = np.argmax(probabilities, axis=-1)
# Both objects are numpy.
return prediction, probabilities
def pytorch_regression_predict(model, samples_predict):
prediction = model(samples_predict['features']).detach().numpy()
return prediction
def select_fn_lose(
library:str,
analysis_type:str
):
fn_lose = None
if (library == 'keras'):
if (analysis_type == 'regression'):
fn_lose = Algorithm.keras_regression_lose
elif (analysis_type == 'classification_binary'):
fn_lose = Algorithm.keras_binary_lose
elif (analysis_type == 'classification_multi'):
fn_lose = Algorithm.keras_multiclass_lose
elif (library == 'pytorch'):
if (analysis_type == 'regression'):
fn_lose = Algorithm.pytorch_regression_lose
elif (analysis_type == 'classification_binary'):
fn_lose = Algorithm.pytorch_binary_lose
elif (analysis_type == 'classification_multi'):
fn_lose = Algorithm.pytorch_multiclass_lose
# After each of the predefined approaches above, check if it is still undefined.
if fn_lose is None:
raise ValueError(dedent("""
Yikes - You did not provide a `fn_lose`,
and we don't have an automated function for your combination of 'library' and 'analysis_type'
"""))
return fn_lose
def select_fn_optimize(library:str):
fn_optimize = None
if (library == 'keras'):
fn_optimize = Algorithm.keras_optimize
elif (library == 'pytorch'):
fn_optimize = Algorithm.pytorch_optimize
# After each of the predefined approaches above, check if it is still undefined.
if (fn_optimize is None):
raise ValueError(dedent("""
Yikes - You did not provide a `fn_optimize`,
and we don't have an automated function for your 'library'
"""))
return fn_optimize
def select_fn_predict(
library:str,
analysis_type:str
):
fn_predict = None
if (library == 'keras'):
if (analysis_type == 'classification_multi'):
fn_predict = Algorithm.keras_multiclass_predict
elif (analysis_type == 'classification_binary'):
fn_predict = Algorithm.keras_binary_predict
elif (analysis_type == 'regression'):
fn_predict = Algorithm.keras_regression_predict
elif (library == 'pytorch'):
if (analysis_type == 'classification_multi'):
fn_predict = Algorithm.pytorch_multiclass_predict
elif (analysis_type == 'classification_binary'):
fn_predict = Algorithm.pytorch_binary_predict
elif (analysis_type == 'regression'):
fn_predict = Algorithm.pytorch_regression_predict
# After each of the predefined approaches above, check if it is still undefined.
if fn_predict is None:
raise ValueError(dedent("""
Yikes - You did not provide a `fn_predict`,
and we don't have an automated function for your combination of 'library' and 'analysis_type'
"""))
return fn_predict
def make(
library:str
, analysis_type:str
, fn_build:object
, fn_train:object
, fn_predict:object = None
, fn_lose:object = None
, fn_optimize:object = None
, description:str = None
):
library = library.lower()
if ((library != 'keras') and (library != 'pytorch')):
raise ValueError("\nYikes - Right now, the only libraries we support are 'keras' and 'pytorch'\nMore to come soon!\n")
analysis_type = analysis_type.lower()
supported_analyses = ['classification_multi', 'classification_binary', 'regression']
if (analysis_type not in supported_analyses):
raise ValueError(f"\nYikes - Right now, the only analytics we support are:\n{supported_analyses}\n")
if (fn_predict is None):
fn_predict = Algorithm.select_fn_predict(
library=library, analysis_type=analysis_type
)
if (fn_optimize is None):
fn_optimize = Algorithm.select_fn_optimize(library=library)
if (fn_lose is None):
fn_lose = Algorithm.select_fn_lose(
library=library, analysis_type=analysis_type
)
funcs = [fn_build, fn_optimize, fn_train, fn_predict, fn_lose]
for i, f in enumerate(funcs):
is_func = callable(f)
if (not is_func):
raise ValueError(f"\nYikes - The following variable is not a function, it failed `callable(variable)==True`:\n\n{f}\n")
fn_build = dill_serialize(fn_build)
fn_optimize = dill_serialize(fn_optimize)
fn_train = dill_serialize(fn_train)
fn_predict = dill_serialize(fn_predict)
fn_lose = dill_serialize(fn_lose)
algorithm = Algorithm.create(
library = library
, analysis_type = analysis_type
, fn_build = fn_build
, fn_optimize = fn_optimize
, fn_train = fn_train
, fn_predict = fn_predict
, fn_lose = fn_lose
, description = description
)
return algorithm
def make_hyperparamset(
id:int
, hyperparameters:dict
, description:str = None
, pick_count:int = None
, pick_percent:float = None
):
hyperparamset = Hyperparamset.from_algorithm(
algorithm_id = id
, hyperparameters = hyperparameters
, description = description
, pick_count = pick_count
, pick_percent = pick_percent
)
return hyperparamset
def make_queue(
id:int
, splitset_id:int
, repeat_count:int = 1
, hyperparamset_id:int = None
, foldset_id:int = None
, hide_test:bool = False
):
queue = Queue.from_algorithm(
algorithm_id = id
, splitset_id = splitset_id
, hyperparamset_id = hyperparamset_id
, foldset_id = foldset_id
, repeat_count = repeat_count
, hide_test = hide_test
)
return queue
class Hyperparamset(BaseModel):
"""
- Not glomming this together with Algorithm and Preprocess because you can keep the Algorithm the same,
while running many different queues of hyperparams.
- An algorithm does not have to have a hyperparamset. It can used fixed parameters.
- `repeat_count` is the number of times to run a model, sometimes you just get stuck at local minimas.
- `param_count` is the number of paramets that are being hypertuned.
- `possible_combos_count` is the number of possible combinations of parameters.
- On setting kwargs with `**` and a dict: https://stackoverflow.com/a/29028601/5739514
"""
description = CharField(null=True)
hyperparamcombo_count = IntegerField()
#strategy = CharField() # set to all by default #all/ random. this would generate a different dict with less params to try that should be persisted for transparency.
hyperparameters = JSONField()
algorithm = ForeignKeyField(Algorithm, backref='hyperparamsets')
def from_algorithm(
algorithm_id:int
, hyperparameters:dict
, description:str = None
, pick_count:int = None
, pick_percent:float = None
):
if ((pick_count is not None) and (pick_percent is not None)):
raise ValueError("Yikes - Either `pick_count` or `pick_percent` can be provided, but not both.")
algorithm = Algorithm.get_by_id(algorithm_id)
# Construct the hyperparameter combinations
params_names = list(hyperparameters.keys())
params_lists = list(hyperparameters.values())
# Make sure they are actually lists.
for i, pl in enumerate(params_lists):
params_lists[i] = listify(pl)
# From multiple lists, come up with every unique combination.
params_combos = list(itertools.product(*params_lists))
hyperparamcombo_count = len(params_combos)
params_combos_dicts = []
# Dictionary comprehension for making a dict from two lists.
for params in params_combos:
params_combos_dict = {params_names[i]: params[i] for i in range(len(params_names))}
params_combos_dicts.append(params_combos_dict)
# These are the random selection strategies.
if (pick_count is not None):
if (pick_count < 1):
raise ValueError(f"\nYikes - pick_count:<{pick_count}> cannot be less than 1.\n")
elif (pick_count > hyperparamcombo_count):
print(f"\nInfo - pick_count:<{pick_count}> greater than the number of hyperparameter combinations:<{hyperparamcombo_count}>.\nProceeding with all combinations.\n")
else:
# `sample` handles replacement.
params_combos_dicts = random.sample(params_combos_dicts, pick_count)
hyperparamcombo_count = len(params_combos_dicts)
elif (pick_percent is not None):
if ((pick_percent > 1.0) or (pick_percent <= 0.0)):
raise ValueError(f"\nYikes - pick_percent:<{pick_percent}> must be between 0.0 and 1.0.\n")
else:
select_count = math.ceil(hyperparamcombo_count * pick_percent)
params_combos_dicts = random.sample(params_combos_dicts, select_count)
hyperparamcombo_count = len(params_combos_dicts)
# Now that we have the metadata about combinations
hyperparamset = Hyperparamset.create(
algorithm = algorithm
, description = description
, hyperparameters = hyperparameters
, hyperparamcombo_count = hyperparamcombo_count
)
for i, c in enumerate(params_combos_dicts):
Hyperparamcombo.create(
combination_index = i
, favorite = False
, hyperparameters = c
, hyperparamset = hyperparamset
)
return hyperparamset
class Hyperparamcombo(BaseModel):
combination_index = IntegerField()
favorite = BooleanField()
hyperparameters = JSONField()
hyperparamset = ForeignKeyField(Hyperparamset, backref='hyperparamcombos')
def get_hyperparameters(id:int, as_pandas:bool=False):
hyperparamcombo = Hyperparamcombo.get_by_id(id)
hyperparameters = hyperparamcombo.hyperparameters
params = []
for k,v in hyperparameters.items():
param = {"param":k, "value":v}
params.append(param)
if (as_pandas==True):
df = pd.DataFrame.from_records(params, columns=['param','value'])
return df
elif (as_pandas==False):
return hyperparameters
class Plot():
"""
Data is prepared in the Queue and Predictor classes
before being fed into the methods below.
"""
def __init__(self):
self.plot_template = dict(layout=go.Layout(
font=dict(family='Avenir', color='#FAFAFA'),
title=dict(x=0.05, y=0.95),
titlefont=dict(family='Avenir'),
plot_bgcolor='#181B1E',
paper_bgcolor='#181B1E',
hovermode='closest',
hoverlabel=dict(
bgcolor="#0F0F0F",
font=dict(
family="Avenir",
size=15
)
)))
def performance(self, dataframe:object):
# The 2nd metric is the last
name_metric_2 = dataframe.columns.tolist()[-1]
if (name_metric_2 == "accuracy"):
display_metric_2 = "Accuracy"
elif (name_metric_2 == "r2"):
display_metric_2 = "R²"
else:
raise ValueError(dedent(f"""
Yikes - The name of the 2nd metric to plot was neither 'accuracy' nor 'r2'.
You provided: {name_metric_2}.
The 2nd metric is supposed to be the last column of the dataframe provided.
"""))
fig = px.line(
dataframe
, title = 'Models Metrics by Split'
, x = 'loss'
, y = name_metric_2
, color = 'predictor_id'
, height = 600
, hover_data = ['predictor_id', 'split', 'loss', name_metric_2]
, line_shape='spline'
)
fig.update_traces(
mode = 'markers+lines'
, line = dict(width = 2)
, marker = dict(
size = 8
, line = dict(
width = 2
, color = 'white'
)
)
)
fig.update_layout(
xaxis_title = "Loss"
, yaxis_title = display_metric_2
, template = self.plot_template
)
fig.update_xaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.update_yaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.show()
def learning_curve(self, dataframe:object, analysis_type:str, loss_skip_15pct:bool=False):
"""Dataframe rows are epochs and columns are metric names."""
# Spline seems to crash with too many points.
if (dataframe.shape[0] >= 400):
line_shape = 'linear'
elif (dataframe.shape[0] < 400):
line_shape = 'spline'
df_loss = dataframe[['loss','val_loss']]
df_loss = df_loss.rename(columns={"loss": "train_loss", "val_loss": "validation_loss"})
df_loss = df_loss.round(3)
if loss_skip_15pct:
df_loss = df_loss.tail(round(df_loss.shape[0]*.85))
fig_loss = px.line(
df_loss
, title = 'Training History: Loss'
, line_shape = line_shape
)
fig_loss.update_layout(
xaxis_title = "Epochs"
, yaxis_title = "Loss"
, legend_title = None
, template = self.plot_template
, height = 400
, yaxis = dict(
side = "right"
, tickmode = 'auto'# When loss is initially high, the 0.1 tickmarks are overwhelming.
, tick0 = -1
, nticks = 9
)
, legend = dict(
orientation="h"
, yanchor="bottom"
, y=1.02
, xanchor="right"
, x=1
)
, margin = dict(
t = 5
, b = 0
),
)
fig_loss.update_xaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig_loss.update_yaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
if ("classification" in analysis_type):
df_acc = dataframe[['accuracy', 'val_accuracy']]
df_acc = df_acc.rename(columns={"accuracy": "train_accuracy", "val_accuracy": "validation_accuracy"})
df_acc = df_acc.round(3)
fig_acc = px.line(
df_acc
, title = 'Training History: Accuracy'
, line_shape = line_shape
)
fig_acc.update_layout(
xaxis_title = "Epochs"
, yaxis_title = "accuracy"
, legend_title = None
, height = 400
, template = self.plot_template
, yaxis = dict(
side = "right"
, tickmode = 'linear'
, tick0 = 0.0
, dtick = 0.05
)
, legend = dict(
orientation="h"
, yanchor="bottom"
, y=1.02
, xanchor="right"
, x=1
)
, margin = dict(
t = 5
),
)
fig_acc.update_xaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig_acc.update_yaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig_acc.show()
fig_loss.show()
def confusion_matrix(self, cm_by_split, labels):
for split, cm in cm_by_split.items():
# change each element of z to type string for annotations
cm_text = [[str(y) for y in x] for x in cm]
# set up figure
fig = ff.create_annotated_heatmap(
cm
, x=labels
, y=labels
, annotation_text=cm_text
, colorscale=px.colors.sequential.BuGn
, showscale=True
, colorbar={"title": 'Count'})
# add custom xaxis title
fig.add_annotation(dict(font=dict(color="white", size=12),
x=0.5,
y=1.2,
showarrow=False,
text="Predicted Label",
xref="paper",
yref="paper"))
# add custom yaxis title
fig.add_annotation(dict(font=dict(color="white", size=12),
x=-0.4,
y=0.5,
showarrow=False,
text="Actual Label",
textangle=-90,
xref="paper",
yref="paper"))
fig.update_layout(
title=f"Confusion Matrix: {split.capitalize()}"
, legend_title='Sample Count'
, template=self.plot_template
, height=375 # if too small, it won't render in Jupyter.
, width=850
, yaxis=dict(
tickmode='linear'
, tick0=0.0
, dtick=1.0
, tickfont = dict(
size=10
)
)
, xaxis=dict(
categoryorder='category descending',
tickfont=dict(
size=10
)
)
, margin=dict(
r=325
, l=325
)
)
fig.update_traces(hovertemplate =
"""predicted: %{x}<br>actual: %{y}<br>count: %{z}<extra></extra>""")
fig.show()
def precision_recall(self, dataframe:object):
fig = px.line(
dataframe
, x = 'recall'
, y = 'precision'
, color = 'split'
, title = 'Precision-Recall Curves'
)
fig.update_layout(
legend_title = None
, template = self.plot_template
, height = 500
, yaxis = dict(
side = "right"
, tickmode = 'linear'
, tick0 = 0.0
, dtick = 0.05
)
, legend = dict(
orientation="h"
, yanchor="bottom"
, y=1.02
, xanchor="right"
, x=1
)
)
fig.update_xaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.update_yaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.show()
def roc_curve(self, dataframe:object):
fig = px.line(
dataframe
, x = 'fpr'
, y = 'tpr'
, color = 'split'
, title = 'Receiver Operating Characteristic (ROC) Curves'
)
fig.update_layout(
legend_title = None
, template = self.plot_template
, height = 500
, xaxis = dict(
title = "False Positive Rate (FPR)"
, tick0 = 0.00
, range = [-0.025,1]
)
, yaxis = dict(
title = "True Positive Rate (TPR)"
, side = "left"
, tickmode = 'linear'
, tick0 = 0.00
, dtick = 0.05
, range = [0,1.05]
)
, legend = dict(
orientation="h"
, yanchor="bottom"
, y=1.02
, xanchor="right"
, x=1
)
, shapes=[
dict(
type = 'line'
, y0=0, y1=1
, x0=0, x1=1
, line = dict(dash='dot', width=2, color='#3b4043')
)]
)
fig.update_xaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.update_yaxes(zeroline=False, gridcolor='#262B2F', tickfont=dict(color='#818487'))
fig.show()
class Queue(BaseModel):
repeat_count = IntegerField()
run_count = IntegerField()
hide_test = BooleanField()
algorithm = ForeignKeyField(Algorithm, backref='queues')
splitset = ForeignKeyField(Splitset, backref='queues')
hyperparamset = ForeignKeyField(Hyperparamset, deferrable='INITIALLY DEFERRED', null=True, backref='queues')
foldset = ForeignKeyField(Foldset, deferrable='INITIALLY DEFERRED', null=True, backref='queues')
def from_algorithm(
algorithm_id:int
, splitset_id:int
, repeat_count:int = 1
, hide_test:bool=False
, hyperparamset_id:int = None
, foldset_id:int = None
):
algorithm = Algorithm.get_by_id(algorithm_id)
library = algorithm.library
splitset = Splitset.get_by_id(splitset_id)
if (foldset_id is not None):
foldset = Foldset.get_by_id(foldset_id)
# Future: since unsupervised won't have a Label for flagging the analysis type, I am going to keep the `Algorithm.analysis_type` attribute for now.
if (splitset.supervision == 'supervised'):
# Validate combinations of alg.analysis_type, lbl.col_count, lbl.dtype, split/fold.bin_count
analysis_type = algorithm.analysis_type
label_col_count = splitset.label.column_count
label_dtypes = list(splitset.label.get_dtypes().values())
labelcoder = splitset.label.get_latest_labelcoder()
if (labelcoder is not None):
stringified_labelcoder = str(labelcoder.sklearn_preprocess)
else:
stringified_labelcoder = None
if (label_col_count == 1):
label_dtype = label_dtypes[0]
if ('classification' in analysis_type):
if (np.issubdtype(label_dtype, np.floating)):
raise ValueError("Yikes - Cannot have `Algorithm.analysis_type!='regression`, when Label dtype falls under `np.floating`.")
if (labelcoder is not None):
if (labelcoder.is_categorical == False):
raise ValueError(dedent(f"""
Yikes - `Algorithm.analysis_type=='classification_*'`, but
`Labelcoder.sklearn_preprocess={stringified_labelcoder}` was not found in known 'classification' encoders:
{categorical_encoders}
"""))
if ('_binary' in analysis_type):
# Prevent OHE w classification_binary
if (stringified_labelcoder.startswith("OneHotEncoder")):
raise ValueError(dedent("""
Yikes - `Algorithm.analysis_type=='classification_binary', but
`Labelcoder.sklearn_preprocess.startswith('OneHotEncoder')`.
This would result in a multi-column output, but binary classification
needs a single column output.
Go back and make a Labelcoder with single column output preprocess like `Binarizer()` instead.
"""))
elif ('_multi' in analysis_type):
if (library == 'pytorch'):
# Prevent OHE w pytorch.
if (stringified_labelcoder.startswith("OneHotEncoder")):
raise ValueError(dedent("""
Yikes - `(analysis_type=='classification_multi') and (library == 'pytorch')`,
but `Labelcoder.sklearn_preprocess.startswith('OneHotEncoder')`.
This would result in a multi-column OHE output.
However, neither `nn.CrossEntropyLoss` nor `nn.NLLLoss` support multi-column input.
Go back and make a Labelcoder with single column output preprocess like `OrdinalEncoder()` instead.
"""))
elif (not stringified_labelcoder.startswith("OrdinalEncoder")):
print(dedent("""
Warning - When `(analysis_type=='classification_multi') and (library == 'pytorch')`
We recommend you use `sklearn.preprocessing.OrdinalEncoder()` as a Labelcoder.
"""))
else:
if (not stringified_labelcoder.startswith("OneHotEncoder")):
print(dedent("""
Warning - When performing non-PyTorch, multi-label classification on a single column,
we recommend you use `sklearn.preprocessing.OneHotEncoder()` as a Labelcoder.
"""))
elif (
(labelcoder is None) and ('_multi' in analysis_type) and (library != 'pytorch')
):
print(dedent("""
Warning - When performing non-PyTorch, multi-label classification on a single column
without using a Labelcoder, Algorithm must have user-defined `fn_lose`,
`fn_optimize`, and `fn_predict`. We recommend you use
`sklearn.preprocessing.OneHotEncoder()` as a Labelcoder instead.
"""))
if (splitset.bin_count is not None):
print(dedent("""
Warning - `'classification' in Algorithm.analysis_type`, but `Splitset.bin_count is not None`.
`bin_count` is meant for `Algorithm.analysis_type=='regression'`.
"""))
if (foldset_id is not None):
# Not doing an `and` because foldset can't be accessed if it doesn't exist.
if (foldset.bin_count is not None):
print(dedent("""
Warning - `'classification' in Algorithm.analysis_type`, but `Foldset.bin_count is not None`.
`bin_count` is meant for `Algorithm.analysis_type=='regression'`.
"""))
elif (analysis_type == 'regression'):
if (labelcoder is not None):
if (labelcoder.is_categorical == True):
raise ValueError(dedent(f"""
Yikes - `Algorithm.analysis_type=='regression'`, but
`Labelcoder.sklearn_preprocess={stringified_labelcoder}` was found in known categorical encoders:
{categorical_encoders}
"""))
if (
(not np.issubdtype(label_dtype, np.floating))
and
(not np.issubdtype(label_dtype, np.unsignedinteger))
and
(not np.issubdtype(label_dtype, np.signedinteger))
):
raise ValueError("Yikes - `Algorithm.analysis_type == 'regression'`, but label dtype was neither `np.floating`, `np.unsignedinteger`, nor `np.signedinteger`.")
if (splitset.bin_count is None):
print("Warning - `Algorithm.analysis_type == 'regression'`, but `bin_count` was not set when creating Splitset.")
if (foldset_id is not None):
if (foldset.bin_count is None):
print("Warning - `Algorithm.analysis_type == 'regression'`, but `bin_count` was not set when creating Foldset.")
if (splitset.bin_count is not None):
print("Warning - `bin_count` was set for Splitset, but not for Foldset. This leads to inconsistent stratification across samples.")
elif (foldset.bin_count is not None):
if (splitset.bin_count is None):
print("Warning - `bin_count` was set for Foldset, but not for Splitset. This leads to inconsistent stratification across samples.")
# We already know these are OHE based on Label creation, so skip dtype, bin, and encoder checks.
elif (label_col_count > 1):
if (analysis_type != 'classification_multi'):
raise ValueError("Yikes - `Label.column_count > 1` but `Algorithm.analysis_type != 'classification_multi'`.")
elif ((splitset.supervision != 'supervised') and (hide_test==True)):
raise ValueError("\nYikes - Cannot have `hide_test==True` if `splitset.supervision != 'supervised'`.\n")
if (foldset_id is not None):
foldset = Foldset.get_by_id(foldset_id)
foldset_splitset = foldset.splitset
if foldset_splitset != splitset:
raise ValueError(f"\nYikes - The Foldset <id:{foldset_id}> and Splitset <id:{splitset_id}> you provided are not related.\n")
folds = list(foldset.folds)
else:
# Just so we have an item to loop over as a null condition when creating Jobs.
folds = [None]
foldset = None
if (hyperparamset_id is not None):
hyperparamset = Hyperparamset.get_by_id(hyperparamset_id)
combos = list(hyperparamset.hyperparamcombos)
else:
# Just so we have an item to loop over as a null condition when creating Jobs.
combos = [None]
hyperparamset = None
# The null conditions set above (e.g. `[None]`) ensure multiplication by 1.
run_count = len(combos) * len(folds) * repeat_count
q = Queue.create(
run_count = run_count
, repeat_count = repeat_count
, algorithm = algorithm
, splitset = splitset
, foldset = foldset
, hyperparamset = hyperparamset
, hide_test = hide_test
)
for c in combos:
if (foldset is not None):
jobset = Jobset.create(
repeat_count = repeat_count
, queue = q
, hyperparamcombo = c
, foldset = foldset
)
elif (foldset is None):
jobset = None
try:
for f in folds:
Job.create(
queue = q
, hyperparamcombo = c
, fold = f
, repeat_count = repeat_count
, jobset = jobset
)
except:
if (foldset is not None):
jobset.delete_instance() # Orphaned.
raise
return q
def poll_statuses(id:int, as_pandas:bool=False):
queue = Queue.get_by_id(id)
repeat_count = queue.repeat_count
statuses = []
for i in range(repeat_count):
for j in queue.jobs:
# Check if there is a Predictor with a matching repeat_index
matching_predictor = Predictor.select().join(Job).join(Queue).where(
Queue.id==queue.id, Job.id==j.id, Predictor.repeat_index==i
)
if (len(matching_predictor) == 1):
r_id = matching_predictor[0].id
elif (len(matching_predictor) == 0):
r_id = None
job_dct = {"job_id":j.id, "repeat_index":i, "predictor_id": r_id}
statuses.append(job_dct)
if (as_pandas==True):
df = pd.DataFrame.from_records(statuses, columns=['job_id', 'repeat_index', 'predictor_id'])
return df.round()
elif (as_pandas==False):
return statuses
def poll_progress(id:int, raw:bool=False, loop:bool=False, loop_delay:int=3):
"""
- For background_process execution where progress bar not visible.
- Could also be used for cloud jobs though.
"""
if (loop==False):
statuses = Queue.poll_statuses(id)
total = len(statuses)
done_count = len([s for s in statuses if s['predictor_id'] is not None])
percent_done = done_count / total
if (raw==True):
return percent_done
elif (raw==False):
done_pt05 = round(round(percent_done / 0.05) * 0.05, -int(math.floor(math.log10(0.05))))
bars_filled = int(done_pt05 * 20)
bars_blank = 20 - bars_filled
meter = '|'
for i in range(bars_filled):
meter += '██'
for i in range(bars_blank):
meter += '--'
meter += '|'
print(f"🔮 Training Models 🔮 {meter} {done_count}/{total} : {int(percent_done*100)}%")
elif (loop==True):
while (loop==True):
statuses = Queue.poll_statuses(id)
total = len(statuses)
done_count = len([s for s in statuses if s['predictor_id'] is not None])
percent_done = done_count / total
if (raw==True):
return percent_done
elif (raw==False):
done_pt05 = round(round(percent_done / 0.05) * 0.05, -int(math.floor(math.log10(0.05))))
bars_filled = int(done_pt05 * 20)
bars_blank = 20 - bars_filled
meter = '|'
for i in range(bars_filled):
meter += '██'
for i in range(bars_blank):
meter += '--'
meter += '|'
print(f"🔮 Training Models 🔮 {meter} {done_count}/{total} : {int(percent_done*100)}%", end='\r')
#print()
if (done_count == total):
loop = False
os.system("say Model training completed")
break
time.sleep(loop_delay)
def run_jobs(id:int, in_background:bool=False, verbose:bool=False):
queue = Queue.get_by_id(id)
# Quick check to make sure all predictors aren't already complete.
run_count = queue.run_count
predictor_count = Predictor.select().join(Job).join(Queue).where(
Queue.id == queue.id).count()
if (run_count == predictor_count):
print("\nAll Jobs have already completed.\n")
else:
if (run_count > predictor_count > 0):
print("\nResuming Jobs...\n")
job_statuses = Queue.poll_statuses(id)
if (in_background==True):
proc_name = "aiqc_queue_" + str(queue.id)
proc_names = [p.name for p in multiprocessing.active_children()]
if (proc_name in proc_names):
raise ValueError(
f"\nYikes - Cannot start this Queue because multiprocessing.Process.name '{proc_name}' is already running."
f"\nIf need be, you can kill the existing Process with `queue.stop_jobs()`.\n"
)
# See notes at top of file about 'fork' vs 'spawn'
proc = multiprocessing.Process(
target = execute_jobs
, name = proc_name
, args = (job_statuses, verbose,) #Needs trailing comma.
)
proc.start()
# proc terminates when `execute_jobs` finishes.
elif (in_background==False):
try:
for j in tqdm(
job_statuses
, desc = "🔮 Training Models 🔮"
, ncols = 100
):
if (j['predictor_id'] is None):
Job.run(id=j['job_id'], verbose=verbose, repeat_index=j['repeat_index'])
except (KeyboardInterrupt):
# So that we don't get nasty error messages when interrupting a long running loop.
print("\nQueue was gracefully interrupted.\n")
def stop_jobs(id:int):
# SQLite is ACID (D = Durable). If transaction is interrupted mid-write, then it is rolled back.
queue = Queue.get_by_id(id)
proc_name = f"aiqc_queue_{queue.id}"
current_procs = [p.name for p in multiprocessing.active_children()]
if (proc_name not in current_procs):
raise ValueError(f"\nYikes - Cannot terminate `multiprocessing.Process.name` '{proc_name}' because it is not running.\n")
processes = multiprocessing.active_children()
for p in processes:
if (p.name == proc_name):
try:
p.terminate()
except:
raise Exception(f"\nYikes - Failed to terminate `multiprocessing.Process` '{proc_name}.'\n")
else:
print(f"\nKilled `multiprocessing.Process` '{proc_name}' spawned from aiqc.Queue <id:{queue.id}>\n")
def metrics_to_pandas(
id:int
, selected_metrics:list=None
, sort_by:list=None
, ascending:bool=False
):
queue = Queue.get_by_id(id)
selected_metrics = listify(selected_metrics)
sort_by = listify(sort_by)
queue_predictions = Prediction.select().join(
Predictor).join(Job).where(Job.queue==id
).order_by(Prediction.id)
queue_predictions = list(queue_predictions)
if (not queue_predictions):
print(dedent("""
~:: Patience, young Padawan ::~
Completed, your Jobs are not. So Predictors to be had, there are None.
"""))
return None
metric_names = list(list(queue_predictions[0].metrics.values())[0].keys())#bad.
if (selected_metrics is not None):
for m in selected_metrics:
if (m not in metric_names):
raise ValueError(dedent(f"""
Yikes - The metric '{m}' does not exist in `Predictor.metrics`.
Note: the metrics available depend on the `Queue.analysis_type`.
"""))
elif (selected_metrics is None):
selected_metrics = metric_names
# Unpack the split data from each Predictor and tag it with relevant Queue metadata.
split_metrics = []
for prediction in queue_predictions:
predictor = prediction.predictor
for split_name,metrics in prediction.metrics.items():
split_metric = {}
if (predictor.job.hyperparamcombo is not None):
split_metric['hyperparamcombo_id'] = predictor.job.hyperparamcombo.id
elif (predictor.job.hyperparamcombo is None):
split_metric['hyperparamcombo_id'] = None
if (queue.foldset is not None):
split_metric['jobset_id'] = predictor.job.jobset.id
split_metric['fold_index'] = predictor.job.fold.fold_index
split_metric['job_id'] = predictor.job.id
if (predictor.job.repeat_count > 1):
split_metric['repeat_index'] = predictor.repeat_index
split_metric['predictor_id'] = prediction.id
split_metric['split'] = split_name
for metric_name,metric_value in metrics.items():
# Check whitelist.
if metric_name in selected_metrics:
split_metric[metric_name] = metric_value
split_metrics.append(split_metric)
column_names = list(split_metrics[0].keys())
if (sort_by is not None):
for name in sort_by:
if (name not in column_names):
raise ValueError(f"\nYikes - Column '{name}' not found in metrics dataframe.\n")
df = pd.DataFrame.from_records(split_metrics).sort_values(
by=sort_by, ascending=ascending
)
elif (sort_by is None):
df = pd.DataFrame.from_records(split_metrics).sort_values(
by=['predictor_id'], ascending=ascending
)
return df
def metrics_aggregate_to_pandas(
id:int
, ascending:bool=False
, selected_metrics:list=None
, selected_stats:list=None
, sort_by:list=None
):
selected_metrics = listify(selected_metrics)
selected_stats = listify(selected_stats)
sort_by = listify(sort_by)
queue_predictions = Prediction.select().join(
Predictor).join(Job).where(Job.queue==id
).order_by(Prediction.id)
queue_predictions = list(queue_predictions)
if (not queue_predictions):
print("\n~:: Patience, young Padawan ::~\n\nThe Jobs have not completed yet, so there are no Predictors to be had.\n")
return None
metrics_aggregate = queue_predictions[0].metrics_aggregate
metric_names = list(metrics_aggregate.keys())
stat_names = list(list(metrics_aggregate.values())[0].keys())
if (selected_metrics is not None):
for m in selected_metrics:
if (m not in metric_names):
raise ValueError(dedent(f"""
Yikes - The metric '{m}' does not exist in `Predictor.metrics_aggregate`.
Note: the metrics available depend on the `Queue.analysis_type`.
"""))
elif (selected_metrics is None):
selected_metrics = metric_names
if (selected_stats is not None):
for s in selected_stats:
if (s not in stat_names):
raise ValueError(f"\nYikes - The statistic '{s}' does not exist in `Predictor.metrics_aggregate`.\n")
elif (selected_stats is None):
selected_stats = stat_names
predictions_stats = []
for prediction in queue_predictions:
predictor = prediction.predictor
for metric, stats in prediction.metrics_aggregate.items():
# Check whitelist.
if (metric in selected_metrics):
stats['metric'] = metric
stats['predictor_id'] = prediction.id
if (predictor.job.repeat_count > 1):
stats['repeat_index'] = predictor.repeat_index
if (predictor.job.fold is not None):
stats['jobset_id'] = predictor.job.jobset.id
stats['fold_index'] = predictor.job.fold.fold_index
else:
stats['job_id'] = predictor.job.id
stats['hyperparamcombo_id'] = predictor.job.hyperparamcombo.id
predictions_stats.append(stats)
# Cannot edit dictionary while key-values are being accessed.
for stat in stat_names:
if (stat not in selected_stats):
for s in predictions_stats:
s.pop(stat)# Errors if not found.
#Reverse the order of the dictionary keys.
predictions_stats = [dict(reversed(list(d.items()))) for d in predictions_stats]
column_names = list(predictions_stats[0].keys())
if (sort_by is not None):
for name in sort_by:
if (name not in column_names):
raise ValueError(f"\nYikes - Column '{name}' not found in aggregate metrics dataframe.\n")
df = pd.DataFrame.from_records(predictions_stats).sort_values(
by=sort_by, ascending=ascending
)
elif (sort_by is None):
df = pd.DataFrame.from_records(predictions_stats)
return df
def plot_performance(
id:int
, max_loss:float=None
, min_accuracy:float=None
, min_r2:float=None
):
"""
Originally I had `min_metric_2` not `min_accuracy` and `min_r2`,
but that would be confusing for users, so I went with informative
erro messages instead.
"""
queue = Queue.get_by_id(id)
analysis_type = queue.algorithm.analysis_type
# Now we need to filter the df based on the specified criteria.
if ("classification" in analysis_type):
if (min_r2 is not None):
raise ValueError("\nYikes - Cannot use argument `min_r2` if `'classification' in queue.analysis_type`.\n")
if (min_accuracy is None):
min_accuracy = 0.0
min_metric_2 = min_accuracy
name_metric_2 = "accuracy"
elif (analysis_type == 'regression'):
if (min_accuracy is not None):
raise ValueError("\nYikes - Cannot use argument `min_accuracy` if `queue.analysis_type='regression'`.\n")
if (min_r2 is None):
min_r2 = -1.0
min_metric_2 = min_r2
name_metric_2 = "r2"
if (max_loss is None):
max_loss = float('inf')
df = queue.metrics_to_pandas()
if (df is None):
# Warning message handled by `metrics_to_pandas() above`.
return None
qry_str = "(loss >= {}) | ({} <= {})".format(max_loss, name_metric_2, min_metric_2)
failed = df.query(qry_str)
failed_runs = failed['predictor_id'].to_list()
failed_runs_unique = list(set(failed_runs))
# Here the `~` inverts it to mean `.isNotIn()`
df_passed = df[~df['predictor_id'].isin(failed_runs_unique)]
df_passed = df_passed.round(3)
dataframe = df_passed[['predictor_id', 'split', 'loss', name_metric_2]]
if dataframe.empty:
print("Yikes - There are no models that met the criteria specified.")
else:
Plot().performance(dataframe=dataframe)
class Jobset(BaseModel):
"""
- Used to group cross-fold Jobs.
- Union of Hyperparamcombo, Foldset, and Queue.
"""
repeat_count = IntegerField()
foldset = ForeignKeyField(Foldset, backref='jobsets')
hyperparamcombo = ForeignKeyField(Hyperparamcombo, backref='jobsets')
queue = ForeignKeyField(Queue, backref='jobsets')
class Job(BaseModel):
"""
- Gets its Algorithm through the Queue.
- Saves its Model to a Predictor.
"""
repeat_count = IntegerField()
#log = CharField() #catch & record stacktrace of failures and warnings?
queue = ForeignKeyField(Queue, backref='jobs')
hyperparamcombo = ForeignKeyField(Hyperparamcombo, deferrable='INITIALLY DEFERRED', null=True, backref='jobs')
fold = ForeignKeyField(Fold, deferrable='INITIALLY DEFERRED', null=True, backref='jobs')
jobset = ForeignKeyField(Jobset, deferrable='INITIALLY DEFERRED', null=True, backref='jobs')
def split_classification_metrics(labels_processed, predictions, probabilities, analysis_type):
if (analysis_type == "classification_binary"):
average = "binary"
roc_average = "micro"
roc_multi_class = None
elif (analysis_type == "classification_multi"):
average = "weighted"
roc_average = "weighted"
roc_multi_class = "ovr"
split_metrics = {}
# Let the classification_multi labels hit this metric in OHE format.
split_metrics['roc_auc'] = sklearn.metrics.roc_auc_score(labels_processed, probabilities, average=roc_average, multi_class=roc_multi_class)
# Then convert the classification_multi labels ordinal format.
if (analysis_type == "classification_multi"):
labels_processed = np.argmax(labels_processed, axis=1)
split_metrics['accuracy'] = sklearn.metrics.accuracy_score(labels_processed, predictions)
split_metrics['precision'] = sklearn.metrics.precision_score(labels_processed, predictions, average=average, zero_division=0)
split_metrics['recall'] = sklearn.metrics.recall_score(labels_processed, predictions, average=average, zero_division=0)
split_metrics['f1'] = sklearn.metrics.f1_score(labels_processed, predictions, average=average, zero_division=0)
return split_metrics
def split_regression_metrics(labels, predictions):
split_metrics = {}
split_metrics['r2'] = sklearn.metrics.r2_score(labels, predictions)
split_metrics['mse'] = sklearn.metrics.mean_squared_error(labels, predictions)
split_metrics['explained_variance'] = sklearn.metrics.explained_variance_score(labels, predictions)
return split_metrics
def split_classification_plots(labels_processed, predictions, probabilities, analysis_type):
predictions = predictions.flatten()
probabilities = probabilities.flatten()
split_plot_data = {}
if (analysis_type == "classification_binary"):
labels_processed = labels_processed.flatten()
split_plot_data['confusion_matrix'] = sklearn.metrics.confusion_matrix(labels_processed, predictions)
fpr, tpr, _ = sklearn.metrics.roc_curve(labels_processed, probabilities)
precision, recall, _ = sklearn.metrics.precision_recall_curve(labels_processed, probabilities)
elif (analysis_type == "classification_multi"):
# Flatten OHE labels for use with probabilities.
labels_flat = labels_processed.flatten()
fpr, tpr, _ = sklearn.metrics.roc_curve(labels_flat, probabilities)
precision, recall, _ = sklearn.metrics.precision_recall_curve(labels_flat, probabilities)
# Then convert unflat OHE to ordinal format for use with predictions.
labels_ordinal = np.argmax(labels_processed, axis=1)
split_plot_data['confusion_matrix'] = sklearn.metrics.confusion_matrix(labels_ordinal, predictions)
split_plot_data['roc_curve'] = {}
split_plot_data['roc_curve']['fpr'] = fpr
split_plot_data['roc_curve']['tpr'] = tpr
split_plot_data['precision_recall_curve'] = {}
split_plot_data['precision_recall_curve']['precision'] = precision
split_plot_data['precision_recall_curve']['recall'] = recall
return split_plot_data
def encoder_fit_labels(
arr_labels:object, samples_train:list,
labelcoder:object
):
"""
- All Label columns are always used during encoding.
- Rows determine what fit happens.
"""
if (labelcoder is not None):
preproc = labelcoder.sklearn_preprocess
if (labelcoder.only_fit_train == True):
labels_to_fit = arr_labels[samples_train]
elif (labelcoder.only_fit_train == False):
labels_to_fit = arr_labels
fitted_coders, encoding_dimension = Labelcoder.fit_dynamicDimensions(
sklearn_preprocess = preproc
, samples_to_fit = labels_to_fit
)
# Save the fit.
fitted_encoders = fitted_coders[0]#take out of list before adding to dict.
return fitted_encoders
def encoder_transform_labels(
arr_labels:object,
fitted_encoders:object, labelcoder:object
):
encoding_dimension = labelcoder.encoding_dimension
arr_labels = Labelcoder.transform_dynamicDimensions(
fitted_encoders = [fitted_encoders] # `list(fitted_encoders)`, fails.
, encoding_dimension = encoding_dimension
, samples_to_transform = arr_labels
)
return arr_labels
def colIndices_from_colNames(column_names:list, desired_cols:list):
desired_cols = listify(desired_cols)
col_indices = [column_names.index(c) for c in desired_cols]
return col_indices
def cols_by_indices(arr:object, col_indices:list):
# Input and output 2D array. Fetches a subset of columns using their indices.
# In the future if this needs to be adjusted to handle 3D array `[:,col_indices,:]`.
subset_arr = arr[:,col_indices]
return subset_arr
def encoderset_fit_features(
arr_features:object, samples_train:list,
encoderset:object
):
featurecoders = list(encoderset.featurecoders)
fitted_encoders = []
if (len(featurecoders) > 0):
f_cols = encoderset.feature.columns
# For each featurecoder: fetch, transform, & concatenate matching features.
# One nested list per Featurecoder. List of lists.
for featurecoder in featurecoders:
preproc = featurecoder.sklearn_preprocess
if (featurecoder.only_fit_train == True):
features_to_fit = arr_features[samples_train]
elif (featurecoder.only_fit_train == False):
features_to_fit = arr_features
# Handles `Dataset.Sequence` by stacking the 2D arrays into a tall 2D array.
features_shape = features_to_fit.shape
if (len(features_shape)==3):
rows_2D = features_shape[0] * features_shape[1]
features_to_fit = features_to_fit.reshape(rows_2D, features_shape[2])
# Only fit these columns.
matching_columns = featurecoder.matching_columns
# Get the indices of the desired columns.
col_indices = Job.colIndices_from_colNames(
column_names=f_cols, desired_cols=matching_columns
)
# Filter the array using those indices.
features_to_fit = Job.cols_by_indices(features_to_fit, col_indices)
# Fit the encoder on the subset.
fitted_coders, encoding_dimension = Labelcoder.fit_dynamicDimensions(
sklearn_preprocess = preproc
, samples_to_fit = features_to_fit
)
fitted_encoders.append(fitted_coders)
return fitted_encoders
def encoderset_transform_features(
arr_features:object,
fitted_encoders:list, encoderset:object
):
"""
- Can't overwrite columns with data of different type (e.g. encoding object to int),
so they have to be pieced together.
"""
featurecoders = list(encoderset.featurecoders)
if (len(featurecoders) > 0):
# Handle Sequence (part 1): reshape 3D to tall 2D for transformation.
features_shape = arr_features.shape
if (len(features_shape)==3):
rows_2D = features_shape[0] * features_shape[1]
arr_features = arr_features.reshape(rows_2D, features_shape[2])
f_cols = encoderset.feature.columns
transformed_features = None #Used as a placeholder for `np.concatenate`.
for featurecoder in featurecoders:
idx = featurecoder.featurecoder_index
fitted_coders = fitted_encoders[idx]# returns list
encoding_dimension = featurecoder.encoding_dimension
# Only transform these columns.
matching_columns = featurecoder.matching_columns
# Get the indices of the desired columns.
col_indices = Job.colIndices_from_colNames(
column_names=f_cols, desired_cols=matching_columns
)
# Filter the array using those indices.
features_to_transform = Job.cols_by_indices(arr_features, col_indices)
if (idx == 0):
# It's the first encoder. Nothing to concat with, so just overwite the None value.
transformed_features = Labelcoder.transform_dynamicDimensions(
fitted_encoders = fitted_coders
, encoding_dimension = encoding_dimension
, samples_to_transform = features_to_transform
)
elif (idx > 0):
encoded_features = Labelcoder.transform_dynamicDimensions(
fitted_encoders = fitted_coders
, encoding_dimension = encoding_dimension
, samples_to_transform = features_to_transform
)
# Then concatenate w previously encoded features.
transformed_features = np.concatenate(
(transformed_features, encoded_features)
, axis = 1
)
# After all featurecoders run, merge in leftover, unencoded columns.
leftover_columns = featurecoders[-1].leftover_columns
if (len(leftover_columns) > 0):
# Get the indices of the desired columns.
col_indices = Job.colIndices_from_colNames(
column_names=f_cols, desired_cols=leftover_columns
)
# Filter the array using those indices.
leftover_features = Job.cols_by_indices(arr_features, col_indices)
transformed_features = np.concatenate(
(transformed_features, leftover_features)
, axis = 1
)
# Handle Sequence (part 2): reshape 3D to tall 2D for transformation.
if (len(features_shape)==3):
transformed_features = arr_features.reshape(
features_shape[0],
features_shape[1],
features_shape[2]
)
elif (len(featurecoders) == 0):
transformed_features = arr_features
return transformed_features
def predict(samples:dict, predictor_id:int, splitset_id:int=None):
"""
Evaluation: predictions, metrics, charts for each split/fold.
- Metrics are run against encoded data because they won't accept string data.
- `splitset_id` refers to a splitset provided for inference, not training.
"""
predictor = Predictor.get_by_id(predictor_id)
hyperparamcombo = predictor.job.hyperparamcombo
algorithm = predictor.job.queue.algorithm
library = algorithm.library
analysis_type = algorithm.analysis_type
# Access the 2nd level of the `samples:dict` to determine if it has Labels.
first_key = list(samples.keys())[0]
if ('labels' in samples[first_key].keys()):
has_labels = True
else:
has_labels = False
# Prepare the logic.
model = predictor.get_model()
if (algorithm.library == 'keras'):
model = predictor.get_model()
elif (algorithm.library == 'pytorch'):
# Returns tuple(model,optimizer)
model = predictor.get_model()
model = model[0].eval()
fn_predict = dill_deserialize(algorithm.fn_predict)
if (hyperparamcombo is not None):
hp = hyperparamcombo.hyperparameters
elif (hyperparamcombo is None):
hp = {} #`**` cannot be None.
if (has_labels == True):
fn_lose = dill_deserialize(algorithm.fn_lose)
loser = fn_lose(**hp)
if (loser is None):
raise ValueError("\nYikes - `fn_lose` returned `None`.\nDid you include `return loser` at the end of the function?\n")
predictions = {}
probabilities = {}
if (has_labels == True):
metrics = {}
plot_data = {}
if ("classification" in analysis_type):
for split, data in samples.items():
preds, probs = fn_predict(model, data)
predictions[split] = preds
probabilities[split] = probs
# Outputs numpy.
if (has_labels == True):
# https://keras.io/api/losses/probabilistic_losses/
if (library == 'keras'):
loss = loser(data['labels'], probs)
elif (library == 'pytorch'):
tz_probs = torch.FloatTensor(probs)
if (algorithm.analysis_type == 'classification_binary'):
loss = loser(tz_probs, data['labels'])
# convert back to numpy for metrics and plots.
data['labels'] = data['labels'].detach().numpy()
elif (algorithm.analysis_type == 'classification_multi'):
flat_labels = data['labels'].flatten().to(torch.long)
loss = loser(tz_probs, flat_labels)
# convert back to *OHE* numpy for metrics and plots.
data['labels'] = data['labels'].detach().numpy()
data['labels'] = keras.utils.to_categorical(data['labels'])
metrics[split] = Job.split_classification_metrics(
data['labels'], preds, probs, analysis_type
)
metrics[split]['loss'] = float(loss)
plot_data[split] = Job.split_classification_plots(
data['labels'], preds, probs, analysis_type
)
# During prediction Keras OHE output gets made ordinal for metrics.
# Use the probabilities to recreate the OHE so they can be inverse_transform'ed.
if (("multi" in analysis_type) and (library == 'keras')):
predictions[split] = []
for p in probs:
marker_position = np.argmax(p, axis=-1)
empty_arr = np.zeros(len(p))
empty_arr[marker_position] = 1
predictions[split].append(empty_arr)
predictions[split] = np.array(predictions[split])
elif (analysis_type == "regression"):
# The raw output values *is* the continuous prediction itself.
probs = None
for split, data in samples.items():
preds = fn_predict(model, data)
predictions[split] = preds
# Outputs numpy.
#https://keras.io/api/losses/regression_losses/
if (has_labels == True):
if (library == 'keras'):
loss = loser(data['labels'], preds)
elif (library == 'pytorch'):
tz_preds = torch.FloatTensor(preds)
loss = loser(tz_preds, data['labels'])
# After obtaining loss, make labels numpy again for metrics.
data['labels'] = data['labels'].detach().numpy()
# `preds` object is still numpy.
# Numpy inputs.
metrics[split] = Job.split_regression_metrics(
data['labels'], preds
)
metrics[split]['loss'] = float(loss)
plot_data = None
"""
4b. Format predictions for saving.
- Decode predictions before saving.
- Doesn't use any Label data, but does use Labelcoder fit on the original Labels.
"""
labelcoder, fitted_encoders = Predictor.get_fitted_labelcoder(
job=predictor.job, label=predictor.job.queue.splitset.label
)
if ((fitted_encoders is not None) and (hasattr(fitted_encoders, 'inverse_transform'))):
for split, data in predictions.items():
# OHE is arriving here as ordinal, not OHE.
data = Labelcoder.if_1d_make_2d(data)
predictions[split] = fitted_encoders.inverse_transform(data)
elif((fitted_encoders is not None) and (not hasattr(fitted_encoders, 'inverse_transform'))):
print(dedent("""
Warning - `Predictor.predictions` are encoded.
They cannot be decoded because the `sklearn.preprocessing`
encoder used does not have `inverse_transform`.
"""))
# Flatten.
for split, data in predictions.items():
if (data.ndim > 1):
predictions[split] = data.flatten()
if (has_labels == True):
# 4c. Aggregate metrics across splits/ folds.
# Alphabetize metrics dictionary by key.
for k,v in metrics.items():
metrics[k] = dict(natsorted(v.items()))
# Aggregate metrics across splits (e.g. mean, pstdev).
metric_names = list(list(metrics.values())[0].keys())
metrics_aggregate = {}
for metric in metric_names:
split_values = []
for split, split_metrics in metrics.items():
# ran into obscure errors with `pstdev` when not `float(value)`
value = float(split_metrics[metric])
split_values.append(value)
mean = statistics.mean(split_values)
median = statistics.median(split_values)
pstdev = statistics.pstdev(split_values)
minimum = min(split_values)
maximum = max(split_values)
metrics_aggregate[metric] = {
"mean":mean, "median":median, "pstdev":pstdev,
"minimum":minimum, "maximum":maximum
}
if ((probs is not None) and ("multi" not in algorithm.analysis_type)):
# Don't flatten the softmax probabilities.
probabilities[split] = probabilities[split].flatten()
if (has_labels == False):
metrics = None
metrics_aggregate = None
plot_data = None
if (splitset_id is not None):
splitset = Splitset.get_by_id(splitset_id)
else:
splitset = None
prediction = Prediction.create(
predictions = predictions
, probabilities = probabilities
, metrics = metrics
, metrics_aggregate = metrics_aggregate
, plot_data = plot_data
, predictor = predictor
, splitset = splitset
)
return prediction
def run(id:int, repeat_index:int, verbose:bool=False):
"""
Needs optimization = https://github.com/aiqc/aiqc/projects/1
"""
time_started = datetime.datetime.now()
job = Job.get_by_id(id)
if verbose:
print(f"\nJob #{job.id} starting...")
queue = job.queue
algorithm = queue.algorithm
analysis_type = algorithm.analysis_type
library = algorithm.library
hide_test = queue.hide_test
splitset = queue.splitset
hyperparamcombo = job.hyperparamcombo
fold = job.fold
"""
1. Determines which splits/folds are needed.
- Source of the training & evaluation data varies based on how Splitset and Foldset were designed.
- The rest of the tasks in Job.run() look to `samples:dict` for their data.
- The `key_*` variables are passed to downstream tasks. `key_train` could be either
'train' or 'folds_train_combined'.
"""
samples = {}
if (hide_test == False):
samples['test'] = splitset.samples['test']
key_evaluation = 'test'
elif (hide_test == True):
key_evaluation = None
if (splitset.has_validation):
samples['validation'] = splitset.samples['validation']
key_evaluation = 'validation'
if (fold is not None):
foldset = fold.foldset
fold_index = fold.fold_index
fold_samples = foldset.folds[fold_index].samples
samples['folds_train_combined'] = fold_samples['folds_train_combined']
samples['fold_validation'] = fold_samples['fold_validation']
key_train = "folds_train_combined"
key_evaluation = "fold_validation"
elif (fold is None):
samples['train'] = splitset.samples['train']
key_train = "train"
"""
2. Encodes the labels and features.
- Remember, you `.fit()` on either training data or all data (categoricals).
- Then you transform the entire dataset because downstream processes may need the entire dataset:
e.g. fit imputer to training data, but then impute entire dataset so that encoders can use entire dataset.
- So we transform the entire dataset, then divide it into splits/ folds.
- Then we convert the arrays to pytorch tensors if necessary. Subsetting with a list of indeces and `shape`
work the same in both numpy and torch.
"""
# Labels - fetch and encode.
if (splitset.supervision == "supervised"):
arr_labels = splitset.label.to_numpy()
labelcoder = splitset.label.get_latest_labelcoder()
if (labelcoder is not None):
fitted_encoders = Job.encoder_fit_labels(
arr_labels=arr_labels, samples_train=samples[key_train],
labelcoder=labelcoder
)
arr_labels = Job.encoder_transform_labels(
arr_labels=arr_labels,
fitted_encoders=fitted_encoders, labelcoder=labelcoder
)
FittedLabelcoder.create(fitted_encoders=fitted_encoders, job=job, labelcoder=labelcoder)
if (library == 'pytorch'):
arr_labels = torch.FloatTensor(arr_labels)
# Features - fetch and encode.
featureset = splitset.get_features()
feature_count = len(featureset)
features = []# expecting diff array shapes inside so it has to be list, not array.
for feature in featureset:
arr_features = feature.to_numpy()
encoderset = feature.get_latest_encoderset()
if (encoderset is not None):
# This takes the entire array because it handles all features and splits.
fitted_encoders = Job.encoderset_fit_features(
arr_features=arr_features, samples_train=samples[key_train],
encoderset=encoderset
)
arr_features = Job.encoderset_transform_features(
arr_features=arr_features,
fitted_encoders=fitted_encoders, encoderset=encoderset
)
FittedEncoderset.create(fitted_encoders=fitted_encoders, job=job, encoderset=encoderset)
if (library == 'pytorch'):
arr_features = torch.FloatTensor(arr_features)
# Don't use the list if you don't have to.
if (feature_count > 1):
features.append(arr_features)
"""
- Stage preprocessed data to be passed into the remaining Job steps.
- Example samples dict entry: samples['train']['labels']
- For each entry in the dict, fetch the rows from the encoded data.
- Keras multi-input models accept input as a list. Not using nested dict for multiple
features because it would be hard to figure out feature.id-based keys on the fly.
"""
for split, rows in samples.items():
if (feature_count == 1):
samples[split] = {
"features": arr_features[rows]
, "labels": arr_labels[rows]
}
elif (feature_count > 1):
samples[split] = {
"features": [arr_features[rows] for arr_features in features]
, "labels": arr_labels[rows]
}
"""
- Input shapes can only be determined after encoding has taken place.
- `[0]` accessess the first sample in each array.
- Does not impact the training loop's `batch_size`.
- Shapes are used later by `get_model()` to initialize it.
"""
label_shape = samples[key_train]['labels'][0].shape
if (feature_count == 1):
features_shape = samples[key_train]['features'][0].shape
elif (feature_count > 1):
features_shape = [arr_features[0].shape for arr_features in samples[key_train]['features']]
input_shapes = {
"features_shape": features_shape
, "label_shape": label_shape
}
"""
3. Build and Train model.
- This does not need to be modularized out of `Job.run()` because models are not
trained anywhere else in the codebase.
"""
if (hyperparamcombo is not None):
hp = hyperparamcombo.hyperparameters
elif (hyperparamcombo is None):
hp = {} #`**` cannot be None.
fn_build = dill_deserialize(algorithm.fn_build)
if (splitset.supervision == "supervised"):
# pytorch multiclass has a single ordinal label.
if ((analysis_type == 'classification_multi') and (library == 'pytorch')):
num_classes = len(splitset.label.unique_classes)
model = fn_build(features_shape, num_classes, **hp)
else:
model = fn_build(features_shape, label_shape, **hp)
elif (splitset.supervision == "unsupervised"):
model = fn_build(features_shape, **hp)
if (model is None):
raise ValueError("\nYikes - `fn_build` returned `None`.\nDid you include `return model` at the end of the function?\n")
# The model and optimizer get combined during training.
fn_lose = dill_deserialize(algorithm.fn_lose)
fn_optimize = dill_deserialize(algorithm.fn_optimize)
fn_train = dill_deserialize(algorithm.fn_train)
loser = fn_lose(**hp)
if (loser is None):
raise ValueError("\nYikes - `fn_lose` returned `None`.\nDid you include `return loser` at the end of the function?\n")
if (library == 'keras'):
optimizer = fn_optimize(**hp)
elif (library == 'pytorch'):
optimizer = fn_optimize(model, **hp)
if (optimizer is None):
raise ValueError("\nYikes - `fn_optimize` returned `None`.\nDid you include `return optimizer` at the end of the function?\n")
if (key_evaluation is not None):
samples_eval = samples[key_evaluation]
elif (key_evaluation is None):
samples_eval = None
if (library == "keras"):
model = fn_train(
model = model
, loser = loser
, optimizer = optimizer
, samples_train = samples[key_train]
, samples_evaluate = samples_eval
, **hp
)
if (model is None):
raise ValueError("\nYikes - `fn_train` returned `model==None`.\nDid you include `return model` at the end of the function?\n")
# Save the artifacts of the trained model.
# If blank this value is `{}` not None.
history = model.history.history
"""
- As of: Python(3.8.7), h5py(2.10.0), Keras(2.4.3), tensorflow(2.4.1)
model.save(buffer) working for neither `io.BytesIO()` nor `tempfile.TemporaryFile()`
https://github.com/keras-team/keras/issues/14411
- So let's switch to a real file in appdirs.
- Assuming `model.save()` will trigger OS-specific h5 drivers.
"""
# Write it.
temp_file_name = f"{app_dir}temp_keras_model.h5"
model.save(
temp_file_name
, include_optimizer = True
, save_format = 'h5'
)
# Fetch the bytes ('rb': read binary)
with open(temp_file_name, 'rb') as file:
model_blob = file.read()
os.remove(temp_file_name)
elif (library == "pytorch"):
model, history = fn_train(
model = model
, loser = loser
, optimizer = optimizer
, samples_train = samples[key_train]
, samples_evaluate = samples_eval
, **hp
)
if (model is None):
raise ValueError("\nYikes - `fn_train` returned `model==None`.\nDid you include `return model` at the end of the function?\n")
if (history is None):
raise ValueError("\nYikes - `fn_train` returned `history==None`.\nDid you include `return model, history` the end of the function?\n")
# Save the artifacts of the trained model.
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training
model_blob = io.BytesIO()
torch.save(
{
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
},
model_blob
)
model_blob = model_blob.getvalue()
"""
5. Save everything to Predictor object.
"""
time_succeeded = datetime.datetime.now()
time_duration = (time_succeeded - time_started).seconds
# There's a chance that a duplicate job-repeat_index pair was running elsewhere and finished first.
matching_predictor = Predictor.select().join(Job).join(Queue).where(
Queue.id==queue.id, Job.id==job.id, Predictor.repeat_index==repeat_index)
if (len(matching_predictor) > 0):
raise ValueError(f"""
Yikes - Duplicate run detected:
Queue<{queue.id}>, Job<{job.id}>, Job.repeat_index<{repeat_index}>.
Cancelling this instance of `run_jobs()` as there is another `run_jobs()` ongoing.
No action needed, the other instance will continue running to completion.
""")
predictor = Predictor.create(
time_started = time_started
, time_succeeded = time_succeeded
, time_duration = time_duration
, model_file = model_blob
, input_shapes = input_shapes
, history = history
, job = job
, repeat_index = repeat_index
)
try:
Job.predict(samples=samples, predictor_id=predictor.id)
except:
predictor.delete_instance()
raise
# Just to be sure not held in memory or multiprocess forked on a 2nd Queue.
del samples
del model
return job
def execute_jobs(job_statuses:list, verbose:bool=False):
"""
- This needs to be a top level function, otherwise you get pickle attribute error.
- Alternatively, you can put this is a separate submodule file, and call it via
`import aiqc.execute_jobs.execute_jobs`
- Tried `mp.Manager` and `mp.Value` for shared variable for progress, but gave up after
a full day of troubleshooting.
- Also you have to get a separate database connection for the separate process.
"""
BaseModel._meta.database.close()
BaseModel._meta.database = get_db()
for j in tqdm(
job_statuses
, desc = "🔮 Training Models 🔮"
, ncols = 100
):
if (j['predictor_id'] is None):
Job.run(id=j['job_id'], verbose=verbose, repeat_index=j['repeat_index'])
class FittedEncoderset(BaseModel):
"""
- Job uses this to save the fitted_encoders, which are later used for inference.
- Useful for accessing featurecoders for matching_columns, dimensions.
- When I added support for multiple Features, updating `Job.fitted_encoders` during
`Job.run()` started to get unmanageable. Especially when you consider that not every
Feature type is guaranteed to have an Encoderset.
"""
fitted_encoders = PickleField()
job = ForeignKeyField(Job, backref='fittedencodersets')
encoderset = ForeignKeyField(Encoderset, backref='fittedencodersets')
class FittedLabelcoder(BaseModel):
"""
- See notes about FittedEncoderset.
"""
fitted_encoders = PickleField()
job = ForeignKeyField(Job, backref='fittedlabelcoders')
labelcoder = ForeignKeyField(Labelcoder, backref='fittedlabelcoders')
class Predictor(BaseModel):
"""
- This was refactored from "Predictor" to "Predictor"
- Regarding metrics, the label encoder was fit on training split labels.
"""
repeat_index = IntegerField()
time_started = DateTimeField()
time_succeeded = DateTimeField()
time_duration = IntegerField()
model_file = BlobField()
input_shapes = JSONField() # used by get_model()
history = JSONField()
job = ForeignKeyField(Job, backref='predictors')
def get_model(id:int):
predictor = Predictor.get_by_id(id)
algorithm = predictor.job.queue.algorithm
model_blob = predictor.model_file
if (algorithm.library == "keras"):
#https://www.tensorflow.org/guide/keras/save_and_serialize
temp_file_name = f"{app_dir}temp_keras_model.h5"
# Workaround: write bytes to file so keras can read from path instead of buffer.
with open(temp_file_name, 'wb') as f:
f.write(model_blob)
h5 = h5py.File(temp_file_name, 'r')
model = keras.models.load_model(h5, compile=True)
os.remove(temp_file_name)
# Unlike pytorch, it's doesn't look like you need to initialize the optimizer or anything.
return model
elif (algorithm.library == 'pytorch'):
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#load
# Need to initialize the classes first, which requires reconstructing them.
if (predictor.job.hyperparamcombo is not None):
hp = predictor.job.hyperparamcombo.hyperparameters
elif (predictor.job.hyperparamcombo is None):
hp = {}
features_shape = predictor.input_shapes['features_shape']
label_shape = predictor.input_shapes['label_shape']
fn_build = dill_deserialize(algorithm.fn_build)
fn_optimize = dill_deserialize(algorithm.fn_optimize)
if (algorithm.analysis_type == 'classification_multi'):
num_classes = len(predictor.job.queue.splitset.label.unique_classes)
model = fn_build(features_shape, num_classes, **hp)
else:
model = fn_build(features_shape, label_shape, **hp)
optimizer = fn_optimize(model, **hp)
model_bytes = io.BytesIO(model_blob)
checkpoint = torch.load(model_bytes)
# Don't assign them: `model = model.load_state_dict ...`
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# "must call model.eval() to set dropout & batchNorm layers to evaluation mode before prediction."
# ^ but you don't need to pass any data into eval()
return model, optimizer
def export_model(id:int, file_path:str=None):
predictor = Predictor.get_by_id(id)
algorithm = predictor.job.queue.algorithm
if (file_path is None):
dtime = datetime.datetime.now().strftime('%Y%b%d_%H:%M')
if (algorithm.library == "keras"):
ext = '.h5'
elif (algorithm.library == 'pytorch'):
ext = '.pt'
file_path = f"{app_dir}/models/predictor{predictor.id}_model({dtime}){ext}"
file_path = os.path.abspath(file_path)
folder = f"{app_dir}/models"
os.makedirs(folder, exist_ok=True)
# We already have the bytes of the file we need to write.
model_blob = predictor.model_file
# trying `+` because directory may not exist yet.
with open(file_path, 'wb+') as f:
f.write(model_blob)
f.close()
os.path.exists(file_path)
print(dedent(
f"\nModel exported to the following absolute path:" \
f"\n{file_path}\n"
))
return file_path
def get_hyperparameters(id:int, as_pandas:bool=False):
"""This is actually a method of `Hyperparamcombo` so we just pass through."""
predictor = Predictor.get_by_id(id)
hyperparamcombo = predictor.job.hyperparamcombo
hp = hyperparamcombo.get_hyperparameters(as_pandas=as_pandas)
return hp
def plot_learning_curve(id:int, loss_skip_15pct:bool=False):
predictor = Predictor.get_by_id(id)
algorithm = predictor.job.queue.algorithm
analysis_type = algorithm.analysis_type
history = predictor.history
dataframe = pd.DataFrame.from_dict(history, orient='index').transpose()
Plot().learning_curve(
dataframe = dataframe
, analysis_type = analysis_type
, loss_skip_15pct = loss_skip_15pct
)
def tabular_schemas_match(set_original, set_new):
# Set can be either Label or Feature. Needs `columns` and `.get_dtypes`.
cols_old = set_original.columns
cols_new = set_new.columns
if (cols_new != cols_old):
raise ValueError("\nYikes - New columns do not match original columns.\n")
typs_old = set_original.get_dtypes()
typs_new = set_new.get_dtypes()
if (typs_new != typs_old):
raise ValueError(dedent("""
Yikes - New dtypes do not match original dtypes.
The Low-Level API methods for Dataset creation accept a `dtype` argument to fix this.
"""))
def image_schemas_match(feature_old, feature_new):
image_old = feature_old.dataset.files[0].images[0]
image_new = feature_new.dataset.files[0].images[0]
if (image_old.size != image_new.size):
raise ValueError(f"\nYikes - The new image size:{image_new.size} did not match the original image size:{image_old.size}.\n")
if (image_old.mode != image_new.mode):
raise ValueError(f"\nYikes - The new image color mode:{image_new.mode} did not match the original image color mode:{image_old.mode}.\n")
def schemaNew_matches_schemaOld(splitset_new:object, splitset_old:object):
# Get the new and old featuresets. Loop over them by index.
features_new = splitset_new.get_features()
features_old = splitset_old.get_features()
if (len(features_new) != len(features_old)):
raise ValueError("\nYikes - Your new and old Splitsets do not contain the same number of Features.\n")
for i, feature_new in enumerate(features_new):
feature_old = features_old[i]
feature_old_typ = feature_old.dataset.dataset_type
feature_new_typ = feature_new.dataset.dataset_type
if (feature_old_typ != feature_new_typ):
raise ValueError(f"\nYikes - New Feature dataset_type={feature_new_typ} != old Feature dataset_type={feature_old_typ}.\n")
if ((feature_new_typ == 'tabular') or (feature_new_typ == 'sequence')):
Predictor.tabular_schemas_match(feature_old, feature_new)
elif (feature_new_typ == 'image'):
Predictor.image_schemas_match(feature_old, feature_new)
# Only verify Labels if the inference new Splitset provides Labels.
# Otherwise, it may be conducting pure inference.
label = splitset_new.label
if (label is not None):
label_new = label
label_new_typ = label_new.dataset.dataset_type
if (splitset_old.supervision == 'unsupervised'):
raise ValueError("\nYikes - New Splitset has Labels, but old Splitset does not have Labels.\n")
elif (splitset_old.supervision == 'supervised'):
label_old = splitset_old.label
label_old_typ = label_old.dataset.dataset_type
if (label_old_typ != label_new_typ):
raise ValueError("\nYikes - New Label and original Label come from different `dataset_types`.\n")
if (label_new_typ == 'tabular'):
Predictor.tabular_schemas_match(label_old, label_new)
def get_fitted_encoderset(job:object, feature:object):
"""
- Given a Feature, you want to know if it needs to be transformed,
and, if so, how to transform it.
"""
fitted_encodersets = FittedEncoderset.select().join(Encoderset).where(
FittedEncoderset.job==job, FittedEncoderset.encoderset.feature==feature
)
if (not fitted_encodersets):
return None, None
else:
encoderset = fitted_encodersets[0].encoderset
fitted_encoders = fitted_encodersets[0].fitted_encoders
return encoderset, fitted_encoders
def get_fitted_labelcoder(job:object, label:object):
"""
- Given a Feature, you want to know if it needs to be transformed,
and, if so, how to transform it.
"""
fitted_labelcoders = FittedLabelcoder.select().join(Labelcoder).where(
FittedLabelcoder.job==job, FittedLabelcoder.labelcoder.label==label
)
if (not fitted_labelcoders):
return None, None
else:
labelcoder = fitted_labelcoders[0].labelcoder
fitted_encoders = fitted_labelcoders[0].fitted_encoders
return labelcoder, fitted_encoders
def infer(id:int, splitset_id:int):
"""
- Splitset is used because Labels and Features can come from different types of Datasets.
- Verifies both Features and Labels match original schema.
"""
splitset_new = Splitset.get_by_id(splitset_id)
predictor = Predictor.get_by_id(id)
splitset_old = predictor.job.queue.splitset
Predictor.schemaNew_matches_schemaOld(splitset_new, splitset_old)
library = predictor.job.queue.algorithm.library
featureset_new = splitset_new.get_features()
featureset_old = splitset_old.get_features()
feature_count = len(featureset_new)
features = []# expecting different array shapes so it has to be list, not array.
for i, feature_new in enumerate(featureset_new):
arr_features = feature_new.to_numpy()
encoderset, fitted_encoders = Predictor.get_fitted_encoderset(
job=predictor.job, feature=featureset_old[i]
)
if (encoderset is not None):
# Don't need to check types because Encoderset creation protects
# against unencodable types.
arr_features = Job.encoderset_transform_features(
arr_features=arr_features,
fitted_encoders=fitted_encoders, encoderset=encoderset
)
if (library == 'pytorch'):
arr_features = torch.FloatTensor(arr_features)
if (feature_count > 1):
features.append(arr_features)
else:
# We don't need to do any row filtering so it can just be overwritten.
features = arr_features
"""
- Pack into samples for the Algorithm functions.
- This is two levels deep to mirror how the training samples were structured
e.g. `samples[<trn,val,tst>]`
- str() id because int keys aren't JSON serializable.
"""
str_id = str(splitset_id)
samples = {str_id: {'features':features}}
if (splitset_new.label is not None):
label_new = splitset_new.label
label_old = splitset_old.label
else:
label_new = None
label_old = None
if (label_new is not None):
arr_labels = label_new.to_numpy()
labelcoder, fitted_encoders = Predictor.get_fitted_labelcoder(
job=predictor.job, label=label_old
)
if (labelcoder is not None):
arr_labels = Job.encoder_transform_labels(
arr_labels=arr_labels,
fitted_encoders=fitted_encoders, labelcoder=labelcoder
)
if (library == 'pytorch'):
arr_labels = torch.FloatTensor(arr_labels)
samples[str_id]['labels'] = arr_labels
prediction = Job.predict(
samples=samples, predictor_id=id, splitset_id=splitset_id
)
return prediction
class Prediction(BaseModel):
"""
- Many-to-Many for making predictions after of the training experiment.
- We use the low level API to create a Dataset because there's a lot of formatting
that happens during Dataset creation that we would lose out on with raw numpy/pandas
input: e.g. columns may need autocreation, and who knows what connectors we'll have
in the future. This forces us to validate dtypes and columns after the fact.
"""
predictions = PickleField()
probabilities = PickleField(null=True) # Not used for regression.
metrics = PickleField(null=True) #inference
metrics_aggregate = PickleField(null=True) #inference.
plot_data = PickleField(null=True) # No regression-specific plots yet.
predictor = ForeignKeyField(Predictor, backref='predictions')
# dataset present if created for inference, v.s. null if from Original training set.
splitset = ForeignKeyField(Splitset, deferrable='INITIALLY DEFERRED', null=True, backref='dataset')
"""
- I moved these plots out of Predictor into Prediction because it felt weird to access the
Prediction via `predictions[0]`.
- If we ever do non-deterministic algorithms then we would not have a 1-1 mapping
between Predictor and Prediction.
"""
def plot_confusion_matrix(id:int):
prediction = Prediction.get_by_id(id)
prediction_plot_data = prediction.plot_data
analysis_type = prediction.predictor.job.queue.algorithm.analysis_type
if (analysis_type == "regression"):
raise ValueError("\nYikes - <Algorithm.analysis_type> of 'regression' does not support this chart.\n")
cm_by_split = {}
labelcoder, fitted_encoders = Predictor.get_fitted_labelcoder(
job=prediction.predictor.job, label=prediction.predictor.job.queue.splitset.label
)
if (labelcoder is not None):
if hasattr(fitted_encoders,'categories_'):
labels = list(fitted_encoders.categories_[0])
elif hasattr(fitted_encoders,'classes_'):
labels = fitted_encoders.classes_.tolist()
else:
unique_classes = prediction.predictor.job.queue.splitset.label.unique_classes
labels = list(unique_classes)
for split, data in prediction_plot_data.items():
cm_by_split[split] = data['confusion_matrix']
Plot().confusion_matrix(cm_by_split=cm_by_split, labels= labels)
def plot_precision_recall(id:int):
prediction = Prediction.get_by_id(id)
predictor_plot_data = prediction.plot_data
analysis_type = prediction.predictor.job.queue.algorithm.analysis_type
if (analysis_type == "regression"):
raise ValueError("\nYikes - <Algorith.analysis_type> of 'regression' does not support this chart.\n")
pr_by_split = {}
for split, data in predictor_plot_data.items():
pr_by_split[split] = data['precision_recall_curve']
dfs = []
for split, data in pr_by_split.items():
df = pd.DataFrame()
df['precision'] = pd.Series(pr_by_split[split]['precision'])
df['recall'] = pd.Series(pr_by_split[split]['recall'])
df['split'] = split
dfs.append(df)
dataframe = pd.concat(dfs, ignore_index=True)
dataframe = dataframe.round(3)
Plot().precision_recall(dataframe=dataframe)
def plot_roc_curve(id:int):
prediction = Prediction.get_by_id(id)
predictor_plot_data = prediction.plot_data
analysis_type = prediction.predictor.job.queue.algorithm.analysis_type
if (analysis_type == "regression"):
raise ValueError("\nYikes - <Algorith.analysis_type> of 'regression' does not support this chart.\n")
roc_by_split = {}
for split, data in predictor_plot_data.items():
roc_by_split[split] = data['roc_curve']
dfs = []
for split, data in roc_by_split.items():
df = | pd.DataFrame() | pandas.DataFrame |
# libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import os, sys
import matplotlib.dates as mdates
import matplotlib as mpl
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axisartist.axislines import Axes
from mpl_toolkits import axisartist
import uncertainpy as un
import statistics as st
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
from src.utils.settings import config
# from src.utils.uq_output import draw_plot
from src.models.icestupaClass import Icestupa
from src.models.methods.metadata import get_parameter_metadata
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == "__main__":
locations = ["gangles21", "guttannen21", "guttannen20"]
index = pd.date_range(start="1-1-2022", end="1-1-2024", freq="D", name="When")
df_out = pd.DataFrame(columns=locations, index=index)
names = [
"DX",
"IE",
"A_I",
"A_S",
"A_DECAY",
"T_PPT",
"Z",
"T_F",
# "D_MEAN",
# "MU_CONE",
# "r_spray",
]
names_label = [
"$\\Delta x$",
"$\\epsilon_{ice}$",
r"$\alpha_{ice}$",
r"$\alpha_{snow}$",
"$\\tau$",
"$T_{ppt}$",
"$z_{0}$",
"$T_{F}$",
# "$d_{mean}$",
# r"$\mu_{cone}$",
# "$r_{spray}$",
]
zip_iterator = zip(names, names_label)
param_dictionary = dict(zip_iterator)
evaluations = []
percent_change = []
efficiency_change = []
site = []
param = []
result = []
freeze_rate = []
melt_rate = []
fig, ax = plt.subplots()
for location in locations:
SITE, FOLDER = config(location)
icestupa = Icestupa(location)
icestupa.read_output()
feature_name = "efficiency"
icestupa.se = (icestupa.M_water + icestupa.M_ice) / icestupa.M_input * 100
for j in range(0, icestupa.df.shape[0]):
if icestupa.df.loc[j, "fountain_froze"] != 0:
freeze_rate.append(
[
get_parameter_metadata(location)["shortname"],
j,
icestupa.df.loc[j, "fountain_froze"] / 60,
]
)
if icestupa.df.loc[j, "melted"] != 0:
melt_rate.append(
[
get_parameter_metadata(location)["shortname"],
j,
icestupa.df.loc[j, "melted"] / 60,
]
)
for name in names:
data = un.Data()
filename1 = FOLDER["sim"] + name + ".h5"
data.load(filename1)
print(data)
evaluations.append(data[feature_name].evaluations)
eval = data[feature_name].evaluations
print(
f"95 percent confidence interval caused by {name} is {round(st.mean(eval),2)} and {round(2 * st.stdev(eval),2)}"
)
for i in range(0, len(data[feature_name].evaluations)):
result.append(
[
get_parameter_metadata(location)["shortname"],
param_dictionary[name],
data[feature_name].evaluations[i],
(data[feature_name].evaluations[i] - icestupa.se),
]
)
df = | pd.DataFrame(result, columns=["Site", "param", "SE", "percent_change"]) | pandas.DataFrame |
import pandas as pd
import datetime
from scipy import sparse
import scipy.io
from scipy.stats import zscore, wilcoxon, spearmanr
from sklearn.preprocessing import binarize, normalize
from sklearn import metrics
from itertools import cycle
from sklearn.metrics import roc_auc_score
import os
import pickle
import seaborn as sns
import subprocess
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import sys
import re
import math
import datetime
from scipy.spatial import distance
from scipy.cluster import hierarchy
import scanpy as sc
from itertools import combinations
from functools import reduce
from scipy.cluster.hierarchy import linkage
import scipy.spatial.distance as ssd
from matplotlib import cm
GENE_SIZES = [10, 50, 100, 200, 500, 1000]
SET = 5
MSHAPES = ['o', 'P', 's', 's', '.', '^', '^', '^', '^', '^', '^']
USHAPES = ['o', 'P', 's', 's', '.', 'v', '^', '>', '<', 'D', 'd']
ALL_DATA = True
SCANPY_OBJS = {'gene': ['GSE100033_gene_id_order_gene__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE123576_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE126074_gene_id_order_gene__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_gene_id_order_gene__all_scanpy_obj.pyn', 'BICCN2_gene_id_order_gene__all_scanpy_obj.pyn'],
'distal': ['GSE100033_distal_id_order_distal__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE123576_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE126074_distal_id_order_distal__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_distal_id_order_distal__all_scanpy_obj.pyn', 'BICCN2_distal_id_order_distal__all_scanpy_obj.pyn'],
'proximal': ['GSE100033_proximal_id_proximal__all_bin_scanpy_obj_with_feat.pyn', 'GSE111586_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE123576_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE126074_proximal_id_order_proximal__all_scanpy_obj.pyn', 'GSE127257_distal_id_gene_order__all_scanpy_obj.pyn', 'GSE1303990_proximal_id_order_proximal__all_scanpy_obj.pyn', 'BICCN2_proximal_id_order_proximal__all_scanpy_obj.pyn']}
GSES = ['GSE100033', 'BICCN2', 'GSE111586', 'GSE123576', 'GSE126074', 'GSE127257', 'GSE1303990']
AMARKER = ['SF', 'CU', 'TA', 'TN', 'SC']
PALETTE = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF', '#3C5488FF']
def get_palette_shape(size, data=False):
global ALL_DATA
print(size)
if data:
if ALL_DATA:
palette = ['#E64B35FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '--', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '--', '--', '--', '--', '--']
else:
if ALL_DATA:
if size == 11:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF', '#3C5488FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF'] + sns.color_palette('Greys', 6)[::-1]
shape = ['-', '-', '-', '-', '--', '--', '--', '--', '--', '--']
else:
assert size <= 10
if size == 10:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF', '#3C5488FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--']
else:
palette = ['#E64B35FF', '#4DBBD5FF', '#00A087FF', '#91D1C2FF'] + sns.color_palette('Greys', 5)[::-1]
shape = ['-', '-', '-', '-', '--', '--', '--', '--', '--']
return palette, shape
def norm_row_columns(X):
from sklearn.preprocessing import MinMaxScaler
X = np.array(X)
print(X.shape)
scaler = MinMaxScaler()
X = np.apply_along_axis(lambda x: MinMaxScaler().fit_transform(x.reshape(-1, 1)), 0, X)
X = np.squeeze(X)
X = np.apply_along_axis(lambda x: MinMaxScaler().fit_transform(x.reshape(-1, 1)), 1, X)
X = np.squeeze(X)
print(X.shape)
return X
def get_celltype_category(sample_types):
# order 'celltype'
if 'AC' in sample_types:
sample_uniq = ['AC', 'EX', 'IN', 'MG', 'OG', 'OT']
elif 'NN' in sample_types:
sample_uniq = ['NN', 'EX', 'IN']
else:
sample_uniq = ['OT', 'EX', 'IN', 'MG', 'OG']
sample_uniq = [x for x in sample_uniq if x in sample_types]
return [str(sample_uniq.index(x))+'_'+x if x in sample_uniq else str(len(sample_uniq))+'_NA' for x in sample_types]
def draw_boxplot(header, df, col_dict=None, sdf=None):
print(df.head())
ax = sns.boxplot(x='marker', y="value", data=df, palette=col_dict, showfliers=False)
if sdf is not None:
ax = sns.swarmplot(x="marker", y="value", data=sdf, color=".2", dodge=True)
else:
ax = sns.swarmplot(x="marker", y="value", data=df, color=".2", dodge=True)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=2, borderaxespad=0.)
plt.savefig(header, bbox_inches='tight')
plt.close('all')
plt.clf()
def collect_auc_from_down_sampled_exp(dir='./output/scobj/'):
print('???')
global AMARKER, PMARKER, SET, ALL_DATA
auc_files = ['BICCN_gene_id_order_gene__all', 'GSE111586_gene_id_order_gene__all', 'GSE123576_gene_id_order_gene__all', 'GSE126074_gene_id_order_gene__all', 'GSE127257_distal_id_gene_order__all']
all_results = None
if ALL_DATA:
auc_files.extend(['GSE1303990_gene_id_order_gene__all'])
for th in [1, 5, 10, 25, 50, 75, 100, 150, 200]:
for fname in auc_files:
print(os.path.join(dir, fname+'_simulate_add_noise_'+str(th)+'_auroc.csv'))
df = pd.read_csv(os.path.join(dir, fname+'_simulate_down_sample_'+str(th)+'_auroc.csv'), header=0, index_col=0)
value_column = ['auc', 'acc', 'precision', 'recall', 'whole', 'ppos', 'tpos', 'roc_file']
columns = [x for x in df.columns if x not in value_column]
df = df.groupby(columns).agg(dict([(x, np.mean) for x in value_column if x != 'roc_file']))
df = df.reset_index(col_level=0)
# print(df)
gse = fname.split('_')[0]
df = df.assign(gse=gse)
df = df.assign(threshold=[th]*df.shape[0])
if all_results is None: all_results = df
else: all_results = | pd.concat([all_results, df]) | pandas.concat |
from multiprocessing import Process
import os
import subprocess
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Descriptors
from random import shuffle
import numpy as np
import time
def read_dev_output(file_name):
# read generated SMILES in seq2seq dev output file
with open(file_name,'r') as f:
smis = []
for l in f:
smi = ''.join(l.split('\t')[0].split(' '))
smis.append(smi)
return smis
def get_novelty(gen_smis, ref_smis, return_novelty=False, ref_can=False):
"""
Get novelty generated MOLs which are not exist in training dataset
para gen_smis: generated SMILES, in list format
para ref_smis: training SMILES, in list format
para return_novelty: if return novelty MOLs, in canonical SMILES format, default False
"""
c_gen_smis = []
for s in gen_smis:
try:
cs = Chem.MolToSmiles(Chem.MolFromSmiles(s))
except:
pass
else:
c_gen_smis.append(cs)
if ref_can:
c_ref_smis = ref_smis
else:
c_ref_smis = [Chem.MolToSmiles(Chem.MolFromSmiles(s)) for s in ref_smis]
c_ref_smis = list(set(c_ref_smis))
c_gen_smis = list(set(c_gen_smis))
nove_smis = [i for i in c_gen_smis if i not in c_ref_smis]
if return_novelty:
return nove_smis
else:
return len(nove_smis)/len(gen_smis)
def get_novelty_smi(gen_smis, ref_smis, return_novelty=False,):
"""
Get novelty generated SMILES which are not exist in training dataset
para gen_smis: generated SMILES, in list format
para ref_smis: training SMILES, in list format
para return_novelty: if return novelty MOLs, in canonical SMILES format, default False
"""
nov_smis = [i for i in gen_smis if i not in ref_smis]
if return_novelty:
return nov_smis
else:
return len(nov_smis)/len(gen_smis)
def get_valid(gen_smis, return_valid=False):
"""
Get valid SMILES in generated samples
para gen_smis: generated SMILES, in list format
para return_valid: if return unique SMILESs, else return the fraction, default False
"""
valid_smi = []
for smi in gen_smis:
try:
m = Chem.MolFromSmiles(smi)
except:
pass
else:
if m != None:
valid_smi.append(smi)
if return_valid:
return valid_smi
else:
return len(valid_smi)/len(gen_smis)
def get_unique(gen_smis, random_sample_n=-1, valid=True, return_unique=False):
"""
Get unique generated samples
para gen_smis: generated SMILES, in list format
para random_sample_n: the number of sampled SMILESs from gen_smis for uniqueness calculation,
-1 means using the whole gen_smis, default -1
para valid: if the unique SMILES should be valid SMILES
para return_unique: if return unique SMILESs, default False
"""
base = get_valid(gen_smis, return_valid=True) if valid else gen_smis
total_smi_n = len(base)
if random_sample_n>total_smi_n or random_sample_n == -1:
sample_n = total_smi_n
else:
sample_n = random_sample_n
base_index = list(range(total_smi_n))
shuffle(base_index)
sample_smis = [base[base_index[i]] for i in range(sample_n)]
unique_smis = list(set(sample_smis))
if return_unique:
return unique_smis
else:
if sample_n == 0:
return 0
else:
return len(unique_smis)/sample_n
def eva_dl(file_list, ref, ids):
"""
The Distribution-Learning evaluation of the generated SMILES
para file_list: the files store the generated SMILES, in list format
para ref: the number of sampled SMILESs from gen_smis for uniqueness calculation,
-1 means using the whole gen_smis, default -1
para ids: job id in multi-process, default None, and would return the metircs in Dataframe, otherwise will write to a csv file
"""
rec_file = open('eva_rec.log','a')
ref_smis = ref
vs = []
us = []
ns = []
for idx, file in enumerate(file_list):
smis = read_dev_output(file)
v_smis = get_valid(smis, return_valid=True)
n_smis = get_novelty_smi(v_smis, ref_smis, return_novelty=True)
vs.append(len(v_smis)/len(smis))
us.append(get_unique(smis))
ns.append(len(n_smis)/len(v_smis))
rec_file.write('DL-evaluation for {0} done\n'.format(file))
rec_file.close()
dl_metrics = pd.DataFrame({'valid_score':vs, 'unique_score':us, 'novelty_score':ns})
if ids == None:
return dl_metrics
else:
dl_metrics.to_csv('subprocess_{0}.csv'.format(ids), index=False)
def eva_gd(file_list, target, ids):
"""
The Goal-Directed evaluation of the generated SMILES
para file_list: the files store the generated SMILES, in list format
para target: the pre-defined goal for generated SMILES, in list format
para ids: job id in multi-process, default None, and would return the metircs in Dataframe, otherwise will write to a csv file
"""
rec_file = open('eva_rec.log','a')
ave_diff = []
ave_p = []
top_1 = []
top_2 = []
top_3 = []
for idx, file in enumerate(file_list):
smis = read_dev_output(file)
if len(smis) != len(target):
cut_ = min(len(smis), len(target))
smis = smis[:cut_]
target_e = target[:cut_]
else:
target_e = target[:]
properties = [0,0,0]
diff = []
for sidx, smi in enumerate(smis):
try:
mol = Chem.MolFromSmiles(smi)
q = Descriptors.qed(mol)
except:
pass
else:
diff.append(abs(q-target_e[sidx]))
properties.append(q)
properties = sorted(properties)[::-1]
top_1.append(properties[0])
top_2.append(properties[1])
top_3.append(properties[2])
ave_p.append(np.mean(properties))
ave_diff.append(np.mean(diff))
rec_file.write('GD-evaluation for {0} done\n'.format(file))
rec_file.close()
gd_metrics = | pd.DataFrame({'ave_diff':ave_diff, 'ave_property':ave_p, 'top_1':top_1, 'top_2':top_2, 'top_3':top_3}) | pandas.DataFrame |
import csv
import os
import pandas as pd
class PatientDataLoader:
def load_everion_patient_data(self, dir_name, filename, csv_delimiter,
tz_to_zurich=True, drop_first_row=False,
**kwargs):
csv_in_file = os.path.join(dir_name, filename)
if not os.path.exists(csv_in_file) or os.path.getsize(csv_in_file) <= 0:
print("csv file is empty")
return pd.DataFrame()
df = pd.read_csv(csv_in_file, sep=csv_delimiter, **kwargs)
if drop_first_row:
df.drop(df.head(1).index, inplace=True)
if tz_to_zurich:
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
import pandas as pd
# Create Series object.
ser = | pd.Series(["NTU", "NCKU", "NCU", "NYCU"]) | pandas.Series |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
# -*- coding: utf-8 -*-
"""
Created on 2021/02/23 21:49
@Project -> File: markov-process-order-determination -> univar_encoding.py
@Author: luolei
@Email: <EMAIL>
@Describe: 一维序列变量编码
"""
from collections import defaultdict
import category_encoders as ce
import pandas as pd
import numpy as np
class UnsuperCategorEncoding(object):
"""将多元序列编码为一元序列"""
def __init__(self, x: np.ndarray):
self.x = x.astype(np.int).flatten()
self.N = len(x)
@staticmethod
def get_map(x: np.ndarray):
labels_discret = np.unique(x, axis=0).astype(int).astype(str)
map_ = defaultdict(int)
for i in range(len(labels_discret)):
map_[labels_discret[i]] = i
return map_
@staticmethod
def _convert2label(x, map_):
x = x.astype(int).astype(str)
key = ''.join(list(x))
return map_[key]
def encode(self, method: str):
map_ = self.get_map(self.x)
series_encoded = np.apply_along_axis(
lambda x: self._convert2label(x, map_), 1, self.x.reshape(-1, 1))
df = | pd.DataFrame(series_encoded, columns=['label']) | pandas.DataFrame |
class Pywedge_Charts():
'''
Makes 8 different types of interactive Charts with interactive axis selection widgets in a single line of code for the given dataset.
Different types of Charts viz,
1. Scatter Plot
2. Pie Chart
3. Bar Plot
4. Violin Plot
5. Box Plot
6. Distribution Plot
7. Histogram
8. Correlation Plot
Inputs:
1. Dataframe
2. c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3. y = target column name as a string
Returns:
Charts widget
'''
def __init__(self, train, c, y, manual=True):
self.train = train
self.c = c
self.y = y
self.X = self.train.drop(self.y,1)
self.manual = manual
def make_charts(self):
import pandas as pd
import ipywidgets as widgets
import plotly.express as px
import plotly.figure_factory as ff
import plotly.offline as pyo
from ipywidgets import HBox, VBox, Button
from ipywidgets import interact, interact_manual, interactive
import plotly.graph_objects as go
from plotly.offline import iplot
header = widgets.HTML(value="<h2>Pywedge Make_Charts </h2>")
display(header)
if len(self.train) > 500:
from sklearn.model_selection import train_test_split
test_size = 500/len(self.train)
if self.c!=None:
data = self.X.drop(self.c,1)
else:
data = self.X
target = self.train[self.y]
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=test_size, random_state=1)
train_mc = pd.concat([X_test, y_test], axis=1)
else:
train_mc = self.train
train_numeric = train_mc.select_dtypes('number')
train_cat = train_mc.select_dtypes(exclude='number')
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
out4 = widgets.Output()
out5 = widgets.Output()
out6 = widgets.Output()
out7 = widgets.Output()
out8 = widgets.Output()
out8 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3, out4, out5, out6, out7, out8])
tab.set_title(0, 'Scatter Plot')
tab.set_title(1, 'Pie Chart')
tab.set_title(2, 'Bar Plot')
tab.set_title(3, 'Violin Plot')
tab.set_title(4, 'Box Plot')
tab.set_title(5, 'Distribution Plot')
tab.set_title(6, 'Histogram')
tab.set_title(7, 'Correlation plot')
display(tab)
with out1:
header = widgets.HTML(value="<h1>Scatter Plots </h1>")
display(header)
x = widgets.Dropdown(options=list(train_mc.select_dtypes('number').columns))
def scatter_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes('number').columns)):
fig = go.FigureWidget(data=go.Scatter(x=train_mc[X_Axis],
y=train_mc[Y_Axis],
mode='markers',
text=list(train_cat),
marker_color=train_mc[Color]))
fig.update_layout(title=f'{Y_Axis.title()} vs {X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig.show()
widgets.interact_manual.opts['manual_name'] = 'Make_Chart'
one = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out2:
header = widgets.HTML(value="<h1>Pie Charts </h1>")
display(header)
def pie_chart(Labels=list(train_mc.select_dtypes(exclude='number').columns),
Values=list(train_mc.select_dtypes('number').columns)[0:]):
fig = go.FigureWidget(data=[go.Pie(labels=train_mc[Labels], values=train_mc[Values])])
fig.update_layout(title=f'{Values.title()} vs {Labels.title()}',
autosize=False,width=500,height=500)
fig.show()
one = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out3:
header = widgets.HTML(value="<h1>Bar Plots </h1>")
display(header)
def bar_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig1 = px.bar(train_mc, x=train_mc[X_Axis], y=train_mc[Y_Axis], color=train_mc[Color])
fig1.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig1.show()
one = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out4:
header = widgets.HTML(value="<h1>Violin Plots </h1>")
display(header)
def viol_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.violin(train_mc, X_Axis, Y_Axis, Color, box=True, hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out5:
header = widgets.HTML(value="<h1>Box Plots </h1>")
display(header)
def box_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[0:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig4 = px.box(train_mc, x=X_Axis, y=Y_Axis, color=Color, points="all")
fig4.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig4.show()
one = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out6:
header = widgets.HTML(value="<h1>Distribution Plots </h1>")
display(header)
def dist_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.histogram(train_mc, X_Axis, Y_Axis, Color, marginal='violin', hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out7:
header = widgets.HTML(value="<h1>Histogram </h1>")
display(header)
def hist_plot(X_Axis=list(train_mc.columns)):
fig2 = px.histogram(train_mc, X_Axis)
fig2.update_layout(title=f'{X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out8:
header = widgets.HTML(value="<h1>Correlation Plots </h1>")
display(header)
import plotly.figure_factory as ff
corrs = train_mc.corr()
colorscale = ['Greys', 'Greens', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
@interact_manual
def plot_corrs(colorscale=colorscale):
figure = ff.create_annotated_heatmap(z = corrs.round(2).values,
x =list(corrs.columns),
y=list(corrs.index),
colorscale=colorscale,
annotation_text=corrs.round(2).values)
iplot(figure)
class baseline_model():
'''
Cleans the raw dataframe to fed into ML models and runs various baseline models. Following data pre_processing will be carried out,
1) segregating numeric & categorical columns
2) missing values imputation for numeric & categorical columns
3) standardization
4) feature importance
5) SMOTE
6) baseline model
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
2) c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3) y = target column name as a string
4) type = Classification / Regression
Returns:
1) Various classification/regressions models & model performances
2) new_X (cleaned feature columns in dataframe)
3) new_y (cleaned target column in dataframe)
4) new_test (cleaned stand out test dataframe
'''
def __init__(self, train, test, c, y, type="Classification"):
self.train = train
self.test = test
self.c = c
self.y = y
self.type = type
self.X = train.drop(self.y,1)
def classification_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if self.type=="Classification":
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
print('\nStarting classification_summary...')
print('TOP 10 FEATURE IMPORTANCE - USING ADABOOST CLASSIFIER')
from sklearn.ensemble import AdaBoostClassifier
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostClassifier().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
from tqdm.notebook import trange, tqdm
classifiers = {
"Logistic" : LogisticRegression(n_jobs=-1),
"KNN(3)" : KNeighborsClassifier(3, n_jobs=-1),
"Decision Tree": DecisionTreeClassifier(max_depth=7),
"Random Forest": RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1),
"AdaBoost" : AdaBoostClassifier(),
"GB Classifier": GradientBoostingClassifier(),
"ExtraTree Cls": ExtraTreesClassifier(n_jobs=-1),
"Hist GB Cls" : HistGradientBoostingClassifier(),
"MLP Cls." : MLPClassifier(alpha=1),
"XGBoost" : xgb.XGBClassifier(max_depth=4, n_estimators=10, learning_rate=0.1, n_jobs=-1),
"CatBoost" : CatBoostClassifier(silent=True),
"Naive Bayes" : GaussianNB(),
"QDA" : QuadraticDiscriminantAnalysis(),
"Linear SVC" : LinearSVC(),
}
from time import time
k = 14
head = list(classifiers.items())[:k]
for name, classifier in tqdm(head):
start = time()
classifier.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = classifier.predict(self.X_test)
predict_time = time()-start
acc_score= (accuracy_score(self.y_test,predictions))
roc_score= (roc_auc_score(self.y_test,predictions))
f1_macro= (f1_score(self.y_test, predictions, average='macro'))
print("{:<15}| acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, acc_score, roc_score, f1_macro, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Logistic Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Classifier', 'AdaBoost', 'CatBoost', 'GB Classifier', 'ExtraTree Cls', 'Hist GB Cls' ],
value='Logistic Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Logistic Regression':
classifier = LogisticRegression(max_iter=1000, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('> Prediction completed. \n> Use dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
classifier = KNeighborsClassifier(3, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
classifier = DecisionTreeClassifier(max_depth=7)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
classifier = RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Classifier':
classifier = MLPClassifier(alpha=1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
classifier = AdaBoostClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
classifier = CatBoostClassifier(silent=True)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'GB Classifier':
classifier = GradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'ExtraTree Cls':
classifier = ExtraTreesClassifier(n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'Hist GB Cls':
classifier = HistGradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
def Regression_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
print('Starting regression summary...')
print('TOP 10 FEATURE IMPORTANCE TABLE')
from sklearn.ensemble import AdaBoostRegressor
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostRegressor().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from time import time
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.linear_model import Lasso, Ridge
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
from math import sqrt
from tqdm.notebook import trange, tqdm
import warnings
warnings.filterwarnings('ignore')
print('--------------------------LINEAR MODELS---------------------------------')
lin_regressors = {
'Linear Reg' : LinearRegression(n_jobs=-1),
'KNN' : KNeighborsRegressor(n_jobs=-1),
'LinearSVR' : LinearSVR(),
'Lasso' : Lasso(),
'Ridge' : Ridge(),
}
from time import time
k = 10
head = list(lin_regressors.items())[:k]
for name, lin_regressors in tqdm(head):
start = time()
lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
print('------------------------NON LINEAR MODELS----------------------------------')
print('---------------------THIS MIGHT TAKE A WHILE-------------------------------')
non_lin_regressors = {
#'SVR' : SVR(),
'Decision Tree' : DecisionTreeRegressor(max_depth=5),
'Random Forest' : RandomForestRegressor(max_depth=10, n_jobs=-1),
'GB Regressor' : GradientBoostingRegressor(n_estimators=200),
'CB Regressor' : CatBoostRegressor(silent=True),
'ADAB Regressor': AdaBoostRegressor(),
'MLP Regressor' : MLPRegressor(),
'XGB Regressor' : xgb.XGBRegressor(n_jobs=-1),
'Extra tree Reg': ExtraTreesRegressor(n_jobs=-1),
'Hist GB Reg' : HistGradientBoostingRegressor()
}
from time import time
k = 10
head = list(non_lin_regressors.items())[:k]
for name, non_lin_regressors in tqdm(head):
start = time()
non_lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = non_lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Linear Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Regressor', 'AdaBoost', 'Grad-Boost''CatBoost'],
value='Linear Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Linear Regression':
regressor = LinearRegression()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
regressor = KNeighborsRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
regressor = DecisionTreeRegressor(max_depth=5)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
regressor = RandomForestRegressor(max_depth=10)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Regressor':
regressor = MLPRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
regressor = AdaBoostRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Grad-Boost':
regressor = GradientBoostingRegressor(n_estimators=200)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
regressor = CatBoostRegressor(silent=True)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
class Pywedge_HP():
'''
Creates interative widget based Hyperparameter selection tool for both Classification & Regression.
For Classification, following baseline estimators are covered in Gridsearch & Randomized search options
1) Logistic Regression
2) Decision Tree
3) Random Forest
4) KNN Classifier
For Regression, following baseline estimators are covered in Gridsearch & Randomized search options
1) Linear Regression
2) Decision Tree Regressor
3) Random Forest Regressor
4) KNN Regressor
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
3) c = any redundant column to be removed (like ID column etc., at present supports a single column removal,
subsequent version will provision multiple column removal requirements)
4) y = target column name as a string
Ouputs:
1) Hyperparameter tuning results
2) Prediction on standout test dataset
'''
def __init__(self, train, test, c, y, tracking=False):
self.train = train
self.test = test
self.c = c
self.y = y
self.X = train.drop(self.y,1)
self.tracking = tracking
def HP_Tune_Classification(self):
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button, Label
from ipywidgets import interact_manual, interactive, interact
import logging
from IPython.display import display, Markdown, clear_output
import warnings
warnings.filterwarnings('ignore')
header_1 = widgets.HTML(value="<h2>Pywedge HP_Tune</h2>")
display(header_1)
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3])
tab.set_title(0, 'Input')
tab.set_title(1, 'Output')
tab.set_title(2, 'Helper Page')
display(tab)
with out1:
header = widgets.HTML(value="<h3>Base Estimator</h3>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.HBox([cat_info, std_scr, apply_smote])
header_2 = widgets.HTML(value="<h3>Pre_processing </h3>")
base_estimator = widgets.Dropdown(
options=['Logistic Regression', 'Decision Tree', 'Random Forest','AdaBoost', 'ExtraTree Classifier', 'KNN Classifier'],
value='Logistic Regression',
description='Choose Base Estimator: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_estimator)
button = widgets.Button(description='Select Base Estimator')
out = widgets.Output()
# Logistic Regression Hyperparameters _Start
penalty_L = widgets.SelectMultiple(
options = ['l1', 'l2', 'elasticnet', 'none'],
value = ['none'],
rows = 4,
description = 'Penalty',
disabled = False)
dual_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Dual',
disabled = False)
tol_L = widgets.Text(
value='0.0001',
placeholder='enter any float value',
description='Tolerence (tol)',
style = {'description_width': 'initial'},
disabled=False)
g = widgets.HBox([penalty_L, dual_L, tol_L])
C_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='C',
disabled=False)
fit_intercept_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Fit_intercept',
disabled = False)
intercept_scaling_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='Intercept_scaling',
style = {'description_width': 'initial'},
disabled=False)
h = widgets.HBox([C_L, fit_intercept_L, intercept_scaling_L])
class_weight_L = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['None'],
rows = 2,
description = 'Class_weight',
disabled = False)
random_state_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
solver_L = widgets.SelectMultiple(
options = ['newton-cg', 'lbfgs', 'sag', 'saga'],
value = ['lbfgs'],
rows = 4,
description = 'Solver',
disabled = False)
i= widgets.HBox([class_weight_L, random_state_L, solver_L])
max_iter_L = widgets.Text(
value='100',
placeholder='enter any integer value',
description='Max_Iterations',
style = {'description_width': 'initial'},
disabled=False)
verbose_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Warm_State',
disabled = False)
j= widgets.HBox([max_iter_L, verbose_L, warm_state_L])
L1_Ratio_L = widgets.Text(
value='None',
placeholder='enter any integer value',
description='L1_Ratio',
style = {'description_width': 'initial'},
disabled=False)
k = widgets.HBox([L1_Ratio_L])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_L = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_L = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_L = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_L, cv_L, scoring_L])
n_iter_L = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_L = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_L, n_iter_L, n_iter_text])
null = widgets.HTML('<br></br>')
button_2 = widgets.Button(description='Submit HP_Tune')
out_res = widgets.Output()
def on_out_res_clicked(_):
with out_res:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'penalty': list(penalty_L.value),
'dual': list(dual_L.value),
'tol': [float(item) for item in tol_L.value.split(',')],
'C' : [float(item) for item in C_L.value.split(',')],
'fit_intercept' : list(fit_intercept_L.value),
'intercept_scaling' : [float(item) for item in intercept_scaling_L.value.split(',')],
'class_weight' : list(class_weight_L.value),
'random_state' : [int(item) for item in random_state_L.value.split(',')],
'solver' : list(solver_L.value),
'max_iter' : [float(item) for item in max_iter_L.value.split(',')],
# 'multi_class' : list(multiclass.value),
'verbose' : [float(item) for item in verbose_L.value.split(',')],
# 'n_jobs' : [float(item) for item in n_jobs.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from ipywidgets import interact, interactive
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
Pred = widgets.HTML(value='<h3><em>Predictions on stand_out test data</em></h3>')
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
b = widgets.VBox([button_2, out_res])
h1 = widgets.HTML('<h3>Select Logistic Regression Hyperparameters</h3>')
aa = widgets.VBox([header_2, pp_class, h1, g,h,i,j,k, h5, l, m, null, b])
# Logistic Regression Hyperpameter - Ends
# Decision Tree Hyperparameter - Starts
criterion_D = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
description = 'Criterion',
rows = 2,
disabled = False)
splitter_D = widgets.SelectMultiple(
options = ['best', 'random'],
value = ['best'],
rows = 2,
description = 'Splitter',
disabled = False)
max_depth_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_D = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_D = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
disabled=False)
max_leaf_nodes_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
class_weight_D = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['balanced'],
rows = 2,
description = 'Class_weight',
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_D = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
first_row = widgets.HBox([criterion_D, splitter_D, max_features_D])
second_row = widgets.HBox([min_samples_split_D, min_weight_fraction_D, max_depth_D])
third_row = widgets.HBox([random_state_D, max_leaf_nodes_D, min_impurity_decrease_D])
fourth_row = widgets.HBox([ccp_alpha_D, class_weight_D, min_samples_leaf_D])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_D = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_D = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_D, cv_D, scoring_D])
n_iter_D = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_D, n_iter_D, n_iter_text])
button_3 = widgets.Button(description='Submit HP_Tune')
out_res_DT = widgets.Output()
def on_out_res_clicked_DT(_):
with out_res_DT:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
# print(criterion_D.value)
param_grid = {'criterion': list(criterion_D.value),
'splitter': list(splitter_D.value),
'max_depth': [int(item) for item in max_depth_D.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_D.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_D.value.split(',')],
# 'min_weight_fraction' : [float(item) for item in min_weight_fraction.value.split(',')],
'max_features' : list(max_features_D.value),
'random_state' : [int(item) for item in random_state_D.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_D.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_D.value.split(',')],
'ccp_alpha' : [float(item) for item in ccp_alpha_D.value.split(',')],
'class_weight' : list(class_weight_D.value)
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore", category=Warning)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_D.value),
cv = int(cv_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
button_3.on_click(on_out_res_clicked_DT)
b = widgets.VBox([button_3, out_res_DT])
h1 = widgets.HTML('<h3>Select Decision Tree Hyperparameters</h3>')
frame = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, h5, l, m, b])
# Decision Tree Hyperparameter Ends
# Random Forest Hyperparameter Starts
n_estimators_R = widgets.Text(
value='100',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
criterion_R = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
rows = 2,
description = 'Criterion',
disabled = False)
max_depth_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_leaf_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_R = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
max_leaf_nodes_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
bootstrap_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Bootstrap',
rows = 2,
disabled = False)
oob_score_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'oob_score',
rows = 2,
disabled = False)
verbose_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Warm_State',
style = {'description_width': 'initial'},
rows = 2,
disabled = False)
class_weight_R = widgets.SelectMultiple(
options = ['balanced', 'balanced_subsample', 'None'],
value = ['balanced'],
description = 'Class_weight',
rows = 3,
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_R = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
max_samples_R = widgets.Text(
value='2',
placeholder='enter any float value',
description='max_samples',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_R = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_R = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_R, cv_R, scoring_R])
n_jobs_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_R = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_R, n_iter_R, n_iter_text])
first_row = widgets.HBox([n_estimators_R, criterion_R, max_depth_R])
second_row = widgets.HBox([min_samples_split_R, min_samples_leaf_R, min_weight_fraction_leaf_R])
third_row = widgets.HBox([max_features_R, max_leaf_nodes_R, min_impurity_decrease_R])
fourth_row = widgets.HBox([max_samples_R, bootstrap_R, oob_score_R])
fifth_row = widgets.HBox([warm_state_R, random_state_R, verbose_R])
sixth_row = widgets.HBox([class_weight_R, ccp_alpha_R])
button_4 = widgets.Button(description='Submit RF GridSearchCV')
out_res_RF = widgets.Output()
def on_out_res_clicked_RF(_):
with out_res_RF:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_R.value.split(',')],
'criterion': list(criterion_R.value),
'max_depth': [int(item) for item in max_depth_R.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_R.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_R.value.split(',')],
'min_weight_fraction_leaf' : [float(item) for item in min_weight_fraction_leaf_R.value.split(',')],
'max_features' : list(max_features_R.value),
'random_state' : [int(item) for item in random_state_R.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_R.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_R.value.split(',')],
'bootstrap' : list(bootstrap_R.value),
'oob_score' : list(oob_score_R.value),
'verbose' : [int(item) for item in verbose_R.value.split(',')],
'class_weight' : list(class_weight_R.value),
'ccp_alpha' : [float(item) for item in ccp_alpha_R.value.split(',')],
'max_samples' : [int(item) for item in max_samples_R.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_R.value),
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_4.on_click(on_out_res_clicked_RF)
b = widgets.VBox([button_4, out_res_RF])
h1 = widgets.HTML('<h3>Select Random Forest Hyperparameters</h3>')
frame_RF = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, fifth_row, sixth_row, h5, l, m, b])
# Random Forest Hyperparameter ends
# KNN Classifier Hyperparameter Starts
n_neighbors_k = widgets.Text(
value='5',
placeholder='enter any integer value',
description='n_neighbors',
disabled=False)
weights_k = widgets.SelectMultiple(
options = ['uniform', 'distance'],
value = ['uniform'],
rows = 2,
description = 'Weights',
disabled = False)
algorithm_k = widgets.SelectMultiple(
options = ['auto', 'ball_tree', 'kd_tree', 'brute'],
value = ['auto'],
rows = 4,
description = 'Algorithm',
disabled = False)
leaf_size_k = widgets.Text(
value='30',
placeholder='enter any integer value',
description='Leaf_Size',
disabled=False)
p_k = widgets.Text(
value='2',
placeholder='enter any integer value',
description='p (Power param)',
disabled=False)
metric_k = widgets.SelectMultiple(
options = ['euclidean', 'manhattan', 'chebyshev', 'minkowski'],
value = ['minkowski'],
rows = 4,
description = 'Metric',
disabled = False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_K = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_K = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_K = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_K, cv_K, scoring_K])
n_iter_K = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_K = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_K, n_iter_text])
first_row = widgets.HBox([n_neighbors_k, weights_k, algorithm_k])
second_row = widgets.HBox([leaf_size_k, p_k, metric_k])
button_5 = widgets.Button(description='Submit RF GridSearchCV')
out_res_K = widgets.Output()
def on_out_res_clicked_K(_):
with out_res_K:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
# print(n_neighbors_k.value)
param_grid = {'n_neighbors' : [int(item) for item in n_neighbors_k.value.split(',')],
'weights': list(weights_k.value),
'algorithm': list(algorithm_k.value),
'leaf_size' : [int(item) for item in leaf_size_k.value.split(',')],
'p' : [int(item) for item in p_k.value.split(',')],
'metric' : list(metric_k.value),
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = KNeighborsClassifier()
if search_param_K.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_K.value),
scoring = scoring_K.value)
if search_param_K.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_K.value),
n_iter = int(n_iter_K.value),
n_jobs = int(n_jobs_K.value),
scoring = scoring_K.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = KNeighborsClassifier()
if search_param_K.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_K.value),
cv = int(cv_K.value),
scoring = scoring_K.value)
if search_param_K.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_K.value),
n_jobs = int(n_jobs_K.value),
n_iter = int(n_iter_K.value),
scoring = scoring_K.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_5.on_click(on_out_res_clicked_K)
b = widgets.VBox([button_5, out_res_K])
h1 = widgets.HTML('<h3>Select KNN Classifier Hyperparameters</h3>')
frame_K = widgets.VBox([header_2, pp_class, h1, first_row, second_row, h5, l, m, b])
#KNN Classifier Hyperparameter ends
# Adaboost Classifier Hyperparameter Starts
n_estimators_A = widgets.Text(
value='50',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
learning_rate_A = widgets.Text(
value='1',
placeholder='enter any float value',
description='learning_rate',
disabled=False)
algorithm_A = widgets.SelectMultiple(
options = ['SAMME', 'SAMME.R'],
value = ['SAMME.R'],
rows = 2,
description = 'Algorithm',
disabled = False)
random_state_A = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_A = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_A = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_A = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_A, cv_A, scoring_A])
n_jobs_A = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_A = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_A, n_iter_A, n_iter_text])
first_row = widgets.HBox([n_estimators_A, learning_rate_A, algorithm_A])
second_row = widgets.HBox([random_state_A])
button_6 = widgets.Button(description='Submit Adaboost HPTune')
out_res_ADA = widgets.Output()
def on_out_res_clicked_ADA(_):
with out_res_ADA:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_A.value.split(',')],
'learning_rate': [float(item) for item in learning_rate_A.value.split(',')],
'algorithm': list(algorithm_A.value),
'random_state' : [int(item) for item in random_state_A.value.split(',')],
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = AdaBoostClassifier()
if search_param_A.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_A.value),
n_jobs = int(n_jobs_A.value),
scoring = scoring_A.value)
if search_param_A.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_A.value),
n_jobs = int(n_jobs_A.value),
n_iter = int(n_iter_A.value),
scoring = scoring_A.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = AdaBoostClassifier()
if search_param_A.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_A.value),
n_jobs = int(n_jobs_A.value),
scoring = scoring_A.value)
if search_param_A.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_A.value),
n_iter = int(n_iter_A.value),
n_jobs = int(n_jobs_A.value),
scoring = scoring_A.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_6.on_click(on_out_res_clicked_ADA)
b = widgets.VBox([button_6, out_res_ADA])
h1 = widgets.HTML('<h3>Select Adaboost Hyperparameters</h3>')
frame_A = widgets.VBox([header_2, pp_class, h1, first_row, second_row, h5, l, m, b])
# Adaboost Cls Hyperparameter ends
# Extra Trees Hyperparameter Starts
n_estimators_E = widgets.Text(
value='100',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
criterion_E = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
rows = 2,
description = 'Criterion',
disabled = False)
max_depth_E = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_E = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_E = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_leaf_E = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_E = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_E = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
max_leaf_nodes_E = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_E = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
bootstrap_E = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Bootstrap',
rows = 2,
disabled = False)
oob_score_E = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'oob_score',
rows = 2,
disabled = False)
verbose_E = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_E = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Warm_State',
style = {'description_width': 'initial'},
rows = 2,
disabled = False)
class_weight_E = widgets.SelectMultiple(
options = ['balanced', 'balanced_subsample', 'None'],
value = ['balanced'],
description = 'Class_weight',
rows = 3,
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_E = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
max_samples_E = widgets.Text(
value='2',
placeholder='enter any float value',
description='max_samples',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_E = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_E = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_E = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_E, cv_E, scoring_E])
n_jobs_E = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_E = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_E, n_iter_E, n_iter_text])
first_row = widgets.HBox([n_estimators_E, criterion_E, max_depth_E])
second_row = widgets.HBox([min_samples_split_E, min_samples_leaf_E, min_weight_fraction_leaf_E])
third_row = widgets.HBox([max_features_E, max_leaf_nodes_E, min_impurity_decrease_E])
fourth_row = widgets.HBox([max_samples_E, bootstrap_E, oob_score_E])
fifth_row = widgets.HBox([warm_state_E, random_state_E, verbose_E])
sixth_row = widgets.HBox([class_weight_E, ccp_alpha_E])
button_7 = widgets.Button(description='Submit ET GridSearchCV')
out_res_ET = widgets.Output()
def on_out_res_clicked_ET(_):
with out_res_ET:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_E.value.split(',')],
'criterion': list(criterion_E.value),
'max_depth': [int(item) for item in max_depth_E.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_E.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_E.value.split(',')],
'min_weight_fraction_leaf' : [float(item) for item in min_weight_fraction_leaf_E.value.split(',')],
'max_features' : list(max_features_E.value),
'random_state' : [int(item) for item in random_state_E.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_E.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_E.value.split(',')],
'bootstrap' : list(bootstrap_E.value),
'oob_score' : list(oob_score_E.value),
'verbose' : [int(item) for item in verbose_E.value.split(',')],
'class_weight' : list(class_weight_E.value),
'ccp_alpha' : [float(item) for item in ccp_alpha_E.value.split(',')],
'max_samples' : [int(item) for item in max_samples_E.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = ExtraTreesClassifier()
if search_param_E.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_E.value),
n_jobs = int(n_jobs_E.value),
scoring = scoring_E.value)
if search_param_E.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_E.value),
n_jobs = int(n_jobs_E.value),
n_iter = int(n_iter_E.value),
scoring = scoring_E.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = ExtraTreesClassifier()
if search_param_E.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_E.value),
cv = int(cv_E.value),
scoring = scoring_E.value)
if search_param_E.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_E.value),
n_jobs = int(n_jobs_E.value),
n_iter = int(n_iter_E.value),
scoring = scoring_E.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_7.on_click(on_out_res_clicked_ET)
b = widgets.VBox([button_7, out_res_ET])
h1 = widgets.HTML('<h3>Select ExtraTrees Hyperparameters</h3>')
frame_ET = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, fifth_row, sixth_row, h5, l, m, b])
# Extra Trees Hyperparameter ends
def on_button_clicked(_):
with out:
clear_output()
selection = base_estimator.value
if selection == 'Logistic Regression':
display(aa)
with out3:
clear_output()
hp = widgets.HTML('<h4> Logistic Regression - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
elif selection =='Decision Tree':
display(frame)
with out3:
clear_output()
hp = widgets.HTML('<h4> Decision Tree Classifier - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
elif selection == 'Random Forest':
display(frame_RF)
with out3:
clear_output()
hp = widgets.HTML('<h4> Random Forest Classifier - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
elif selection == 'AdaBoost':
display(frame_A)
with out3:
clear_output()
hp = widgets.HTML('<h4> Adaboost Classifier - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
elif selection == 'ExtraTree Classifier':
display(frame_ET)
with out3:
clear_output()
hp = widgets.HTML('<h4> ExtraTree Classifier - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
elif selection == 'KNN Classifier':
display(frame_K)
with out3:
clear_output()
hp = widgets.HTML('<h4> KNN Classifier - Scikit Learn web page</h4>')
hp_1 = widgets.HTML('<iframe src="https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html" width="100%" height="600" title="SKLearn Helper page"> </iframe>')
display(hp, hp_1)
button.on_click(on_button_clicked)
a = widgets.VBox([button, out])
display(a)
def HP_Tune_Regression(self):
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, ExtraTreesRegressor
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from math import sqrt
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button, Label
from ipywidgets import interact_manual, interactive, interact
import logging
from IPython.display import display, Markdown, clear_output
import warnings
warnings.filterwarnings('ignore')
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3])
tab.set_title(0, 'Input')
tab.set_title(1, 'Output')
tab.set_title(2, 'Helper Page')
display(tab)
with out1:
header_1 = widgets.HTML(value="<h2>Pywedge HP_Tune</h2>")
header = widgets.HTML(value="<h3>Base Estimator</h3>")
display(header_1, header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.HBox([cat_info, std_scr])
header_2 = widgets.HTML(value="<h3>Pre_processing </h3>")
base_estimator = widgets.Dropdown(
options=['Linear Regression', 'Decision Tree Regressor', 'Random Forest Regressor', 'AdaBoost Regressor', 'ExtraTree Regressor', 'KNN Regressor'],
value='Linear Regression',
description='Choose Base Estimator: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_estimator)
button = widgets.Button(description='Select Base Estimator')
out = widgets.Output()
# Linear Regression Hyperparameters _Start
fit_intercept_L = widgets.SelectMultiple(
options = [True, False],
value = [True],
rows = 2,
description = 'Fit_Intercept',
disabled = False)
normalize_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Normalize',
disabled = False)
copy_X_L = widgets.SelectMultiple(
options = [True, False],
value = [True],
rows = 2,
description = 'Copy_X',
disabled = False)
g = widgets.HBox([fit_intercept_L, normalize_L, copy_X_L])
positive_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Positive',
disabled = False)
h = widgets.HBox([positive_L])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_L = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_L = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
l = widgets.HBox([search_param_L, cv_L])
n_iter_L = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_L, n_iter_text])
null = widgets.HTML('<br></br>')
button_2 = widgets.Button(description='Submit HP_Tune')
out_res = widgets.Output()
def on_out_res_clicked(_):
with out_res:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
param_grid = {'fit_intercept': list(fit_intercept_L.value),
'normalize': list(normalize_L.value),
'copy_X': list(copy_X_L.value)
#'positive' : list(positive_L.value)
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = LinearRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value))
if search_param_L.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value))
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
mlflow.log_param("Exp_Var", exp_var)
mlflow.log_param("MAE", mae)
mlflow.log_param("RMSE", rmse)
mlflow.log_param('R2', r2)
mlflow.log_param("Best Estimator", self.regressor.best_estimator_)
if self.tracking == False:
estimator = LinearRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value))
if search_param_L.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value))
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.regressor.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.regressor.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} ".format(exp_var, mae, rmse, r2,))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking == True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
b = widgets.VBox([button_2, out_res])
h1 = widgets.HTML('<h3>Select Linear Regression Hyperparameters</h3>')
aa = widgets.VBox([header_2, pp_class, h1, g,h,h5, l, m, null, b])
# Linear Regression Hyperpameter - Ends
# Decision Tree Regressor Hyperparameter - Starts
criterion_D = widgets.SelectMultiple(
options = ['mse', 'friedman_mse', 'mae', 'poisson'],
value = ['mse'],
description = 'Criterion',
rows = 4,
disabled = False)
splitter_D = widgets.SelectMultiple(
options = ['best', 'random'],
value = ['best'],
rows = 2,
description = 'Splitter',
disabled = False)
max_depth_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_D = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_D = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
disabled=False)
max_leaf_nodes_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
ccp_alpha_D = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
first_row = widgets.HBox([criterion_D, splitter_D, max_features_D])
second_row = widgets.HBox([min_samples_split_D, min_weight_fraction_D, max_depth_D])
third_row = widgets.HBox([random_state_D, max_leaf_nodes_D, min_impurity_decrease_D])
fourth_row = widgets.HBox([ccp_alpha_D, min_samples_leaf_D])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_D = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
l = widgets.HBox([search_param_D, cv_D, n_jobs_D])
n_iter_D = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_D, n_iter_text])
button_3 = widgets.Button(description='Submit HP_Tune')
out_res_DT = widgets.Output()
def on_out_res_clicked_DT(_):
with out_res_DT:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
param_grid = {'criterion': list(criterion_D.value),
'splitter': list(splitter_D.value),
'max_depth': [int(item) for item in max_depth_D.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_D.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_D.value.split(',')],
# 'min_weight_fraction' : [float(item) for item in min_weight_fraction.value.split(',')],
'max_features' : list(max_features_D.value),
'random_state' : [int(item) for item in random_state_D.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_D.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_D.value.split(',')],
'ccp_alpha' : [float(item) for item in ccp_alpha_D.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = DecisionTreeRegressor()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_D.value),
n_jobs = int(cv_D.value))
if search_param_D.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_iter = int(n_iter_D.value),
n_jobs = int(cv_D.value))
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
mlflow.log_param("Exp_Var", exp_var)
mlflow.log_param("MAE", mae)
mlflow.log_param("RMSE", rmse)
mlflow.log_param('R2', r2)
mlflow.log_param("Best Estimator", self.regressor.best_estimator_)
if self.tracking == False:
estimator = DecisionTreeRegressor()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_D.value),
n_jobs = int(cv_D.value))
if search_param_D.value == 'Random Search CV':
print('> Randimized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_iter = int(n_iter_D.value),
n_jobs = int(cv_D.value))
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.regressor.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.regressor.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} ".format(exp_var, mae, rmse, r2,))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
button_3.on_click(on_out_res_clicked_DT)
b = widgets.VBox([button_3, out_res_DT])
h1 = widgets.HTML('<h3>Select Decision Tree Regressor Hyperparameters</h3>')
frame = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, h5, l, m, b])
# Decision Tree Hyperparameter Ends
# Random Forest Regressor Hyperparameter Starts
n_estimators_R = widgets.Text(
value='100',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
criterion_R = widgets.SelectMultiple(
options = ['mse', 'mae'],
value = ['mse'],
rows = 2,
description = 'Criterion',
disabled = False)
max_depth_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_leaf_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_R = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
max_leaf_nodes_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
bootstrap_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Bootstrap',
rows = 2,
disabled = False)
oob_score_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'oob_score',
rows = 2,
disabled = False)
verbose_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Warm_State',
rows = 2,
style = {'description_width': 'initial'},
disabled = False)
class_weight_R = widgets.SelectMultiple(
options = ['balanced', 'balanced_subsample', 'None'],
value = ['balanced'],
description = 'Class_weight',
rows = 3,
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_R = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
max_samples_R = widgets.Text(
value='2',
placeholder='enter any float value',
description='max_samples',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_R = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
l = widgets.HBox([search_param_R, cv_R, n_jobs_R])
n_iter_R = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_R, n_iter_text])
first_row = widgets.HBox([n_estimators_R, criterion_R, max_depth_R])
second_row = widgets.HBox([min_samples_split_R, min_samples_leaf_R, min_weight_fraction_leaf_R])
third_row = widgets.HBox([max_features_R, max_leaf_nodes_R, min_impurity_decrease_R])
fourth_row = widgets.HBox([max_samples_R, bootstrap_R, oob_score_R])
fifth_row = widgets.HBox([random_state_R, verbose_R])
sixth_row = widgets.HBox([warm_state_R, ccp_alpha_R])
button_4 = widgets.Button(description='Submit HP_Tune')
out_res_RF = widgets.Output()
def on_out_res_clicked_RF(_):
with out_res_RF:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_R.value.split(',')],
'criterion': list(criterion_R.value),
'max_depth': [int(item) for item in max_depth_R.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_R.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_R.value.split(',')],
'min_weight_fraction_leaf' : [float(item) for item in min_weight_fraction_leaf_R.value.split(',')],
'max_features' : list(max_features_R.value),
'random_state' : [int(item) for item in random_state_R.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_R.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_R.value.split(',')],
'bootstrap' : list(bootstrap_R.value),
'oob_score' : list(oob_score_R.value),
'verbose' : [int(item) for item in verbose_R.value.split(',')],
'ccp_alpha' : [float(item) for item in ccp_alpha_R.value.split(',')],
'max_samples' : [int(item) for item in max_samples_R.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = RandomForestRegressor()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value))
if search_param_R.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_R.value))
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
mlflow.log_param("Exp_Var", exp_var)
mlflow.log_param("MAE", mae)
mlflow.log_param("RMSE", rmse)
mlflow.log_param('R2', r2)
mlflow.log_param("Best Estimator", self.regressor.best_estimator_)
if self.tracking == False:
estimator = RandomForestRegressor()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value))
if search_param_R.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_R.value))
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.regressor.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.regressor.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} ".format(exp_var, mae, rmse, r2,))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_4.on_click(on_out_res_clicked_RF)
b = widgets.VBox([button_4, out_res_RF])
h1 = widgets.HTML('<h3>Select Random Forest Regressor Hyperparameters</h3>')
frame_RF = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, fifth_row, sixth_row, h5, l, m, b])
# Random Forest Regressor Hyperparameter ends
# KNN Regressor Hyperparameter Starts
n_neighbors_k = widgets.Text(
value='5',
placeholder='enter any integer value',
description='n_neighbors',
disabled=False)
weights_k = widgets.SelectMultiple(
options = ['uniform', 'distance'],
value = ['uniform'],
rows = 2,
description = 'Weights',
disabled = False)
algorithm_k = widgets.SelectMultiple(
options = ['auto', 'ball_tree', 'kd_tree', 'brute'],
value = ['auto'],
rows = 4,
description = 'Algorithm',
disabled = False)
leaf_size_k = widgets.Text(
value='30',
placeholder='enter any integer value',
description='Leaf_Size',
disabled=False)
p_k = widgets.Text(
value='2',
placeholder='enter any integer value',
description='p (Power param)',
disabled=False)
metric_k = widgets.SelectMultiple(
options = ['euclidean', 'manhattan', 'chebyshev', 'minkowski'],
value = ['minkowski'],
rows = 4,
description = 'Metric',
disabled = False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_K = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_K = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_K = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
l = widgets.HBox([search_param_K, cv_K, n_jobs_K])
n_iter_K = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_K, n_iter_text])
first_row = widgets.HBox([n_neighbors_k, weights_k, algorithm_k])
second_row = widgets.HBox([leaf_size_k, p_k, metric_k])
button_5 = widgets.Button(description='Submit HP_Tune')
out_res_K = widgets.Output()
def on_out_res_clicked_K(_):
with out_res_K:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
param_grid = {'n_neighbors' : [int(item) for item in n_neighbors_k.value.split(',')],
'weights': list(weights_k.value),
'algorithm': list(algorithm_k.value),
'leaf_size' : [int(item) for item in leaf_size_k.value.split(',')],
'p' : [int(item) for item in p_k.value.split(',')],
'metric' : list(metric_k.value),
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = KNeighborsRegressor()
if search_param_K.value == 'GridSearch CV':
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value))
if search_param_K.value == 'Random Search CV':
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_R.value))
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
mlflow.log_param("Exp_Var", exp_var)
mlflow.log_param("MAE", mae)
mlflow.log_param("RMSE", rmse)
mlflow.log_param('R2', r2)
mlflow.log_param("Best Estimator", self.regressor.best_estimator_)
if self.tracking == False:
estimator = KNeighborsRegressor()
if search_param_K.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value))
if search_param_K.value == 'Random Search CV':
print('> Randomized Search CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_R.value))
warnings.filterwarnings("ignore")
self.regressor = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score, mean_absolute_error, r2_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.regressor.predict(X_test)
exp_var = explained_variance_score(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
rmse = sqrt(mean_absolute_error(y_test, predictions))
r2 = r2_score(y_test, predictions)
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.regressor.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.regressor.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} ".format(exp_var, mae, rmse, r2,))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_5.on_click(on_out_res_clicked_K)
b = widgets.VBox([button_5, out_res_K])
h1 = widgets.HTML('<h3>Select KNN Regressor Hyperparameters</h3>')
frame_K = widgets.VBox([header_2, pp_class, h1, first_row, second_row, h5, l, m, b])
# KNN Regressor Hyperparameter ends
# Adaboost Classifier Hyperparameter Starts
n_estimators_A = widgets.Text(
value='50',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
learning_rate_A = widgets.Text(
value='1',
placeholder='enter any float value',
description='learning_rate',
disabled=False)
loss_A = widgets.SelectMultiple(
options = ['linear', 'square', 'square'],
value = ['linear'],
rows = 3,
description = 'Loss',
disabled = False)
random_state_A = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_A = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_A = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
l = widgets.HBox([search_param_A, cv_A])
n_jobs_A = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_A = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_A, n_iter_A, n_iter_text])
first_row = widgets.HBox([n_estimators_A, learning_rate_A, loss_A])
second_row = widgets.HBox([random_state_A])
button_6 = widgets.Button(description='Submit Adaboost HPTune')
out_res_ADA = widgets.Output()
def on_out_res_clicked_ADA(_):
with out_res_ADA:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = | pd.DataFrame(self.train[[self.y]]) | pandas.DataFrame |
# coding: utf-8
# # Weather Prediction Using Recurrent Neural Networks
#
# ## Adrian, Ben, and Sai
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
from functools import reduce
import datetime
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
from sklearn.preprocessing import MinMaxScaler
import timeit
import random
import sys
import os
# # Preprocessing
###########################################################################################################################
############# Preprocessing ##############################################################################################
###########################################################################################################################
# ### Read in the files
# In[2]:
# Filenames
city_file = 'city_attributes.csv'
temp_file = 'temperature.csv'
humid_file = 'humidity.csv'
press_file = 'pressure.csv'
desc_file = 'weather_description.csv'
wdir_file = 'wind_direction.csv'
wspeed_file = 'wind_speed.csv'
# Load the files
city_df = pd.read_csv(city_file)
city_df.rename(str.lower, axis = 'columns', inplace = True)
city_df.drop(['country'], axis = 1, inplace = True)
city_df.set_index(['city'], inplace = True)
temp_df = | pd.read_csv(temp_file) | pandas.read_csv |
#!/usr/bin/python
import warnings
warnings.filterwarnings("ignore")
import os,numpy,pandas,sys,scipy.io,scipy.sparse,time,numba
from optparse import OptionParser
#
#
opts = OptionParser()
usage = "Evaluate gene score by TSS peaks\nusage: %prog -s project --gtf hg19.gtf --distal 20000"
opts = OptionParser(usage=usage, version="%prog 1.0")
opts.add_option("-s", help="The project folder.")
opts.add_option("--gtf", default='../reference/hg19_RefSeq_genes.gtf',
help="gtf file for genome, default=../reference/hg19_RefSeq_genes.gtf")
opts.add_option("--distal", default=20000,
help="distal region around TSS for peak searching, default=20000")
options, arguments = opts.parse_args()
#
#
def get_tss_region(options):
mm10_df = pandas.read_csv(options.gtf, sep='\t', index_col=0)
genes = list(set(mm10_df['name2']))
genes.sort()
mm10_df.index = mm10_df['name']
names, tss = [], []
for symbol in genes:
sub_df = mm10_df.loc[mm10_df['name2']==symbol]
if len(sub_df.index.values)>=1:
chrom = list(set(sub_df['chrom'].values))
strand = list(set(sub_df['strand'].values))
if len(chrom)==1:
if strand[0]=='+':
starts = list(set(map(str, sub_df['txStart'].values)))
start = ','.join(starts)
elif strand[0]=='-':
starts = list(set(map(str, sub_df['txEnd'].values)))
start = ','.join(starts)
names.append(symbol)
tss.append([chrom[0], start])
tss = numpy.array(tss)
tss_df = pandas.DataFrame(tss, index=names, columns=['chrom', 'tss'])
tss_df.to_csv(options.s+'/peak/genes_tss_region.csv', sep='\t')
return
#
#
def get_tss_peaks(options):
peaks = [[x.split()[0], (int(x.split()[1])+int(x.split()[2]))/2]
for x in open(options.s+'/peak/top_filtered_peaks.bed').readlines()]
peaks_df = pandas.DataFrame(peaks, index=[str(x) for x in numpy.arange(0,len(peaks))],
columns=['chrom', 'center'])
tss_df = pandas.read_csv(options.s+'/peak/genes_tss_region.csv', sep='\t', index_col=0)
for gene in tss_df.index.values:
chrom, tsses = tss_df.ix[gene, 'chrom'], tss_df.ix[gene, 'tss']
tsses = map(int, tsses.split(','))
chr_peaks = peaks_df.loc[peaks_df['chrom']==chrom]
proxim_peaks, distal_peaks = [], []
for tss in tsses:
peaks1 = chr_peaks.loc[abs(chr_peaks['center']-tss)<=2000].index.values
peaks2 = chr_peaks.loc[abs(chr_peaks['center']-tss)<=int(options.distal)].index.values
proxim_peaks.extend(peaks1)
distal_peaks.extend(peaks2)
proxim_peaks = list(set(proxim_peaks))
distal_peaks = list(set(distal_peaks)-set(proxim_peaks))
if len(proxim_peaks)==0: proxim_peaks = ['NONE']
if len(distal_peaks)==0: distal_peaks = ['NONE']
proxim_peaks = ';'.join(proxim_peaks)
tss_df.ix[gene, 'proximal'] = proxim_peaks
distal_peaks = ';'.join(distal_peaks)
tss_df.ix[gene, 'distal'] = distal_peaks
tss_df.to_csv(options.s+'/peak/genes_tss_peaks.csv', sep='\t')
return
#
#
def get_score_from_peaks(options):
tss_df = pandas.read_csv(options.s+'/peak/genes_tss_peaks.csv', sep='\t', index_col=0)
reads = scipy.sparse.csr_matrix(scipy.io.mmread(options.s+'/matrix/filtered_reads.mtx')).T
cells_df = pandas.read_csv(options.s+'/matrix/filtered_cells.csv', sep='\t', index_col=0)
all_peaks = numpy.arange(0, reads.shape[1])
genes, score = [], []
for igene,gene in enumerate(tss_df.index.values):
distal = tss_df.loc[gene, 'distal'].split(';')
proximal = tss_df.loc[gene, 'proximal'].split(';')
if distal==['NONE']:
distal = []
else:
distal = list(map(int, distal))
if proximal==['NONE']:
proximal = []
else:
proximal = list(map(int, proximal))
distal = list(set(distal).union(set(proximal)))
distal = list(set(distal).intersection(set(all_peaks)))
if len(distal)>0:
signal = reads[:, distal].A.mean(axis=1)
genes.append(gene)
score.append(signal)
score = numpy.array(score)
score_df = | pandas.DataFrame(score, index=genes, columns=cells_df.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import tensorflow as tf
from sklearn.svm import LinearSVR
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error as msle
import keras
import keras.layers as layers
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import VotingRegressor
from xgboost import XGBRegressor
from pyramid.arima import auto_arima
dftr=pd.read_csv('Train.csv',parse_dates=['date_time'],index_col='date_time')
dfte=pd.read_csv('Test.csv',parse_dates=['date_time'],index_col='date_time')
dfss=pd.read_csv('sample_submission.csv')
dftr.wind_direction=(3.14/180)*(dftr.wind_direction) #wind_direction cardinal degrees to radians
print(dftr.shape)
date=dftr.index
#Feature Engineering
ms=np.array([],dtype=str) #boolean features
me=np.array([],dtype=str)
ys=np.array([],dtype=str)
ye=np.array([],dtype=str)
qs=np.array([],dtype=str)
qe=np.array([],dtype=str)
for i in range(33750):
aaa=pd.Timestamp(date[i])
me=np.append(me,aaa.is_month_end)
ms=np.append(ms,aaa.is_month_start)
ye=np.append(ye,aaa.is_year_end)
ys=np.append(ys,aaa.is_year_start)
qe=np.append(qe,aaa.is_quarter_end)
qs=np.append(qs,aaa.is_quarter_start)
time=np.array([],dtype=int) # Features extracted from date_time
woy=np.array([],dtype=int)
dow=np.array([],dtype=int)
month=np.array([],dtype=str)
week=np.array([],dtype=float)
days=np.array([],dtype=str)
year=np.array([],dtype=float)
for i in range(33750):
aaa=pd.Timestamp(date[i])
days=np.append(days,aaa.day_name())
month=np.append(month,aaa.month_name())
number_dec = str((aaa.year/100)-int(aaa.year/100)).split('.')[1]
year=np.append(year,int(number_dec[0:2])/10)
time=np.append(time,aaa.hour)
dow=np.append(dow,aaa.dayofweek)
woy=np.append(woy,aaa.weekofyear)
week=np.append(week,((aaa.day-1) // 7 + 1))
tr_x=dftr.iloc[:,0:13]
#One Hot Encoding
tr_x= | pd.get_dummies(tr_x) | pandas.get_dummies |
import pandas as pd
default_parameters = {
'p_prot': -1,
'k_prot': -1,
'l_prot': | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype( | Series(arr) | pandas.Series |
import logging
import pandas as pd
import requests
from common.predictions import PredictionService, PredictionError
logger = logging.getLogger(__name__)
class DataRobotV1APIPredictionService(PredictionService):
"""
A chord prediction service powered by DataRobot V1 API for model deployments.
"""
def __init__(self, server, server_key, deployment_id, username, api_token):
self.server = server
self.server_key = server_key
self.deployment_id = deployment_id
self.username = username
self.api_token = api_token
def predict(self, df):
logger.info(f'Using DataRobot V1 prediction service on data shape {df.shape}')
rows = df.to_dict(orient='records')
dr_payload = self.get_datarobot_predictions(rows)
result = [
self.get_label_and_confidence(row)
for row in dr_payload['data']
]
return | pd.DataFrame(result, columns=['name', 'confidence']) | pandas.DataFrame |
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from scripts.nn_train import TabularDataset, FeedForwardNN, binary_acc
from sklearn.preprocessing import LabelEncoder
class NeuralNet:
def __init__(self):
self.categorical_features = ["MaxDelq2PublicRecLast12M", "MaxDelqEver"]
self.output_feature = "RiskPerformance"
self.label_encoders = {}
self.model_name = "Neural Net"
def fit(self, X_train, y_train):
train = | pd.concat([X_train, y_train], axis=1) | pandas.concat |
import pandas as pd
#Function to create two dictionaries based on the Human Development Index and its levels.
def get_hdi(path,filename):
'''This function creates two dictionaries based on the Human Development Index and its levels.'''
# Read in the xlsx-file with data
file = "{}/{}".format(path,filename)
hdi_data = pd.read_excel(file, header=None)
# Replace the country names in the hdi_data with the corresponding country names as used in
# the regions data frame.
hdi_data[1] = hdi_data[1].replace(to_replace ="Viet Nam", value ="Vietnam")
hdi_data[1] = hdi_data[1].replace(to_replace ="Czechia", value ="Czech Republic")
hdi_data[1] = hdi_data[1].replace(to_replace ="Russian Federation", value ="Russia")
hdi_data[1] = hdi_data[1].replace(to_replace ="Venezuela (Bolivarian Republic of)", value ="Venezuela")
hdi_data[1] = hdi_data[1].replace(to_replace ="Korea (Republic of)", value ="South Korea")
hdi_data[1] = hdi_data[1].replace(to_replace ="Bolivia (Plurinational State of)", value ="Bolivia")
hdi_data[1] = hdi_data[1].replace(to_replace ="Hong Kong, China (SAR)", value ="Hong Kong")
hdi_data[1] = hdi_data[1].replace(to_replace ="Moldova (Republic of)", value ="Moldova")
hdi_data[1] = hdi_data[1].replace(to_replace ="Tanzania (United Republic of)", value ="Tanzania")
hdi_data[1] = hdi_data[1].replace(to_replace ="Lao People's Democratic Republic", value ="Laos")
hdi_data[1] = hdi_data[1].replace(to_replace ="Congo (Democratic Republic of the)", value ="Democratic Republic of the Congo")
hdi_data[1] = hdi_data[1].replace(to_replace ="Samoa", value ="American Samoa")
hdi_data[1] = hdi_data[1].replace(to_replace ="Palestine, State of", value ="Palestine")
hdi_data[1] = hdi_data[1].replace(to_replace ="Antigua and Barbuda", value ="Antigua")
# Select the useful rows and columns from the hdi data file to make the hdi.
df_hdi = hdi_data.iloc[7:200, 1:3]
# Remove the title rows indicating the human development level.
df_hdi = df_hdi[df_hdi[2].notna()]
# Append missing countries
df_hdi_missing = pd.DataFrame([["Macau", 0.914],["Aland Islands", 0.911],["Taiwan", 0.911],["Puerto Rico, U.S.", 0.845],["Western Sahara", 0.6764393349141735]],columns = [1,2])
df_hdi = pd.concat([df_hdi,df_hdi_missing])
# Make a dictionary with countries as keys and the hdi as values.
dict_hdi = dict(df_hdi.values.tolist())
## Create hdi-levels dictionary
# Select the useful rows and column from the hdi data file to make the hdi-levels.
df_levels = hdi_data.iloc[7:200, [1]]
# Create an index based on the title rows indicating the hdi-level.
idx = df_levels[(df_levels[1].str.contains("HUMAN DEVELOPMENT"))].index
# Use the index to create new dataframes per hdi-level.
df_very_high = df_levels.iloc[idx[0]-6:idx[1]-7, :]
df_high = df_levels.iloc[idx[1]-6:idx[2]-7, :]
df_medium = df_levels.iloc[idx[2]-6:idx[3]-7, :]
df_low = df_levels.iloc[idx[3]-6:, :]
# Add a column with the hdi-level per data frame.
df_very_high[2] = "very high"
df_high[2] = "high"
df_medium[2] = "medium"
df_low[2] = "low"
# Append missing countries
df_levels_high_missing = pd.DataFrame([["Macau", "very high"],["Aland Islands", "very high"],["Taiwan", "very high"],["Puerto Rico, U.S.", "very high"]],columns = [1,2])
df_very_high = | pd.concat([df_very_high,df_levels_high_missing]) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('siteid')['siteid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('siteid')['siteid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('siteid')['siteid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('siteid')['siteid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('siteid')['siteid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('siteid')['siteid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('siteid')['siteid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('siteid')['siteid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('siteid')['siteid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('siteid')['siteid'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_siteid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_siteid['num_of_years']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid', how='outer')
d2 = pd.merge(d1, df_12, on='siteid', how='outer')
d3 = pd.merge(d2, df_13, on='siteid', how='outer')
d4 = pd.merge(d3, df_14, on='siteid', how='outer')
d5 = pd.merge(d4, df_15, on='siteid', how='outer')
d6 = pd.merge(d5, df_16, on='siteid', how='outer')
d7 = pd.merge(d6, df_17, on='siteid', how='outer')
d8 = pd.merge(d7, df_18, on='siteid', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_siteid['num_of_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first = pd.DataFrame()
temp_first['siteid'] = df_sum_all_Years_total['siteid']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['siteid'] = df_sum_all_Years['siteid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df20 = pd.merge(avg_siteid, temp_first, on='siteid', how='outer')
total_avg_site_id = pd.merge(df20, temp_reop, on='siteid', how='outer')
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_avg_Firstop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_avg_reop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id.fillna(0,inplace=True)
total_avg_site_id.to_csv('total_avg_site_id.csv')
def groupby_surgid():
df2010 = df_2010.groupby('surgid')['surgid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('surgid')['surgid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('surgid')['surgid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('surgid')['surgid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('surgid')['surgid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('surgid')['surgid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('surgid')['surgid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('surgid')['surgid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('surgid')['surgid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('surgid')['surgid'].count().reset_index(name='2019_total')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years'])
df_sum_all_Years['Year_sum'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum'] / df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years surgid.csv")
print("details on surg id dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years'] != 10]
less_8.to_csv("total op less 10 years surgid.csv")
print("num of surgid with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
# avg_surgid['surgid'] = df_sum_all_Years['surgid']
# avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['total_year_count'] = df_sum_all_Years['Year_sum']
avg_surgid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_surgid_prcab():
df2010 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_surgid['num_of_years']
df_sum_all_Years.to_csv("sum all years surgid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop surgid.csv")
print("num of surgid with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='surgid', how='outer')
d2 = pd.merge(d1, df_12, on='surgid', how='outer')
d3 = pd.merge(d2, df_13, on='surgid', how='outer')
d4 = pd.merge(d3, df_14, on='surgid', how='outer')
d5 = pd.merge(d4, df_15, on='surgid', how='outer')
d6 = pd.merge(d5, df_16, on='surgid', how='outer')
d7 = pd.merge(d6, df_17, on='surgid', how='outer')
d8 = pd.merge(d7, df_18, on='surgid', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='surgid', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['surgid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['surgid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_surgid['num_of_years']
df_sum_all_Years_total.to_csv("First op sum all years surgid.csv")
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years surgid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
# temp_first = pd.DataFrame()
# temp_first['surgid'] = df_sum_all_Years_total['surgid']
# temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
# temp_reop = pd.DataFrame()
# temp_reop['surgid'] = df_sum_all_Years['surgid']
# temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
#
# df20 = pd.merge(avg_surgid, temp_first, on='surgid', how='outer')
# total_avg_surgid = pd.merge(df20, temp_reop, on='surgid', how='outer')
#
# total_avg_surgid['firstop/total'] = (total_avg_surgid['Year_avg_Firstop'] / total_avg_surgid['total_year_avg']) * 100
# total_avg_surgid['reop/total'] = (total_avg_surgid['Year_avg_reop'] / total_avg_surgid['total_year_avg']) * 100
# total_avg_surgid.fillna(0, inplace=True)
# total_avg_surgid.to_csv('total_avg_surgid.csv')
temp_first = pd.DataFrame()
temp_first['surgid'] = df_sum_all_Years_total['surgid']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_reop = pd.DataFrame()
temp_reop['surgid'] = df_sum_all_Years['surgid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df20 = pd.merge(avg_surgid, temp_first, on='surgid', how='outer')
total_avg_surgid = | pd.merge(df20, temp_reop, on='surgid', how='outer') | pandas.merge |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" Download meteorological files from GFS """
import argparse
import json
import os
import sys
from datetime import datetime, timedelta
from inspect import getmembers
from traceback import print_exc
import numpy as np
import pandas as pd
from pydap.client import open_dods
from pydap.exceptions import OpenFileError, ServerError
URL = "https://nomads.ncep.noaa.gov/dods/gfs_{res}{step}/gfs{date}/gfs_{res}{step}_{hour:02d}z.dods?"
FORMAT_STR = (
"{var}.{var}[{time[0]:d}:{time[1]:d}][{lat[0]:d}:{lat[1]:d}][{lon[0]:d}:{lon[1]:d}]"
)
FORMAT_STR_PL = "{var}.{var}[{time[0]:d}:{time[1]:d}][{lev[0]:d}:{lev[1]:d}][{lat[0]:d}:{lat[1]:d}][{lon[0]:d}:{lon[1]:d}]"
VAR_CONF = {
"pressfc": "surface",
"tmp2m": "surface",
"tmp80m": "surface",
"tmp100m": "surface",
"ugrd10m": "surface",
"ugrd80m": "surface",
"ugrd100m": "surface",
"vgrd10m": "surface",
"vgrd80m": "surface",
"vgrd100m": "surface",
"tmpprs": "pressure",
"ugrdprs": "pressure",
"vgrdprs": "pressure",
"hgtprs": "pressure",
}
DATE_FORMAT = "%Y%m%d"
range1 = lambda start, end, step=1: range(start, end + 1, step)
def daterange(start, end):
def convert(date):
try:
date = datetime.strptime(date, DATE_FORMAT)
return date.date()
except TypeError:
return date
# Catch and raise:
# ValueError: day is out of range for month
def get_date(n):
return convert(start) + timedelta(days=n)
days = (convert(end) - convert(start)).days
if days < 0:
raise ValueError("The start date must be before the end date.")
for n in range1(0, days):
yield get_date(n)
def lat_type(str):
try:
lat = float(str)
except:
raise argparse.ArgumentTypeError("invalid float value: '{0}'".format(str))
if lat < -90 or lat > 90:
raise argparse.ArgumentTypeError("latitude not in range -90..90")
else:
return lat
def lon_type(str):
try:
lon = float(str)
except:
raise argparse.ArgumentTypeError("invalid float value: '{0}'".format(str))
if lon < -180 or lon > 180:
raise argparse.ArgumentTypeError("longitude not in range -180..180")
else:
return lon
def get_file(request, param, var_conf, time, lat, lon, verbose=False):
ntime = len(time)
ncoord = len(lat) * len(lon)
var_list = [
(
(FORMAT_STR if vartype == "surface" else FORMAT_STR_PL).format(
var=var, **param
)
)
for var, vartype in var_conf.items()
]
if verbose:
print(request + ",".join(var_list))
try:
dataset = open_dods(request + ",".join(var_list))
except:
raise OpenFileError("file '{}' not available".format(request[:-1]))
var_data = [var.data.reshape((ntime, -1, ncoord)) for var in dataset.values()]
var_names = [
"{}{}".format(var, n)
for idx, var in enumerate(dataset)
for n in range(var_data[idx].shape[1])
]
index = pd.MultiIndex.from_product((lat, lon), names=["lat", "lon"])
columns = | pd.MultiIndex.from_product((time, var_names), names=["time", "var"]) | pandas.MultiIndex.from_product |
"""
DOCSTRING
"""
import matplotlib.pyplot
import numpy
import os
import pandas
import PIL
import seaborn
import skimage
import time
class EDA:
"""
DOCSTRING
"""
def __init__(self):
dict_labels = {
0: "No DR",
1: "Mild",
2: "Moderate",
3: "Severe",
4: "Proliferative DR"}
def __call__(self):
labels = pandas.read_csv("labels/trainLabels.csv")
plot_classification_frequency(labels, "level", "Retinopathy_vs_Frequency_All")
plot_classification_frequency(labels, "level", "Retinopathy_vs_Frequency_Binary", True)
def change_labels(df, category):
"""
Changes the labels for a binary classification.
Either the person has a degree of retinopathy, or they don't.
INPUT
df: Pandas DataFrame of the image name and labels
category: column of the labels
OUTPUT
Column containing a binary classification of 0 or 1
"""
return [1 if l > 0 else 0 for l in df[category]]
def plot_classification_frequency(df, category, file_name, convert_labels=False):
"""
Plots the frequency at which labels occur.
INPUT
df: Pandas DataFrame of the image name and labels
category: category of labels, from 0 to 4
file_name: file name of the image
convert_labels: argument specified for converting to binary classification
RETURN
None
"""
if convert_labels == True:
labels['level'] = change_labels(labels, 'level')
seaborn.set(style="whitegrid", color_codes=True)
seaborn.countplot(x=category, data=labels)
pyplot.title('Retinopathy vs Frequency')
pyplot.savefig(file_name)
return
class ImageToArray:
"""
DOCSTRING
"""
def __call__(self):
start_time = time.time()
labels = pandas.read_csv("../labels/trainLabels_master_256_v2.csv")
print("Writing Train Array")
X_train = convert_images_to_arrays_train('../data/train-resized-256/', labels)
print(X_train.shape)
print("Saving Train Array")
save_to_array('../data/X_train.npy', X_train)
print("--- %s seconds ---" % (time.time() - start_time))
def change_image_name(self, df, column):
"""
Appends the suffix '.jpeg' for all image names in the DataFrame
INPUT
df: Pandas DataFrame, including columns to be altered.
column: The column that will be changed. Takes a string input.
OUTPUT
Pandas DataFrame, with a single column changed to include the
aforementioned suffix.
"""
return [i + '.jpeg' for i in df[column]]
def convert_images_to_arrays_train(self, file_path, df):
"""
Converts each image to an array, and appends each array to a new NumPy
array, based on the image column equaling the image file name.
INPUT
file_path: Specified file path for resized test and train images.
df: Pandas DataFrame being used to assist file imports.
OUTPUT
NumPy array of image arrays.
"""
lst_imgs = [l for l in df['train_image_name']]
return numpy.array([numpy.array(PIL.Image.open(file_path + img)) for img in lst_imgs])
def save_to_array(self, arr_name, arr_object):
"""
Saves data object as a NumPy file. Used for saving train and test arrays.
INPUT
arr_name: The name of the file you want to save.
This input takes a directory string.
arr_object: NumPy array of arrays. This object is saved as a NumPy file.
OUTPUT
NumPy array of image arrays
"""
return numpy.save(arr_name, arr_object)
class PreprocessImages:
"""
DOCSTRING
"""
def __call__(self):
start_time = time.time()
trainLabels = pandas.read_csv('../labels/trainLabels.csv')
trainLabels['image'] = [i + '.jpeg' for i in trainLabels['image']]
trainLabels['black'] = numpy.nan
trainLabels['black'] = find_black_images('../data/train-resized-256/', trainLabels)
trainLabels = trainLabels.loc[trainLabels['black'] == 0]
trainLabels.to_csv('trainLabels_master.csv', index=False, header=True)
print("Completed")
print("--- %s seconds ---" % (time.time() - start_time))
def find_black_images(self, file_path, df):
"""
Creates a column of images that are not black (numpy.mean(img) != 0)
INPUT
file_path: file_path to the images to be analyzed.
df: Pandas DataFrame that includes all labeled image names.
column: column in DataFrame query is evaluated against.
OUTPUT
Column indicating if the photo is pitch black or not.
"""
lst_imgs = [l for l in df['image']]
return [1 if numpy.mean(numpy.array(
PIL.Image.open(file_path + img))) == 0 else 0 for img in lst_imgs]
def rename_images(self, src_dir, new_prefix):
"""
DOCSTRING
"""
for file_name in os.listdir(src_dir):
os.rename(
os.path.join(src_dir, file_name),
os.path.join(src_dir, new_prefix + file_name))
print(file_name + ' -> ' + new_prefix + file_name)
class ReconcileLabels:
"""
DOCSTRING
"""
def __call__(self):
trainLabels = pandas.read_csv("../labels/trainLabels_master.csv")
lst_imgs = get_lst_images('../data/train-resized-256/')
new_trainLabels = pandas.DataFrame({'image': lst_imgs})
new_trainLabels['image2'] = new_trainLabels.image
# remove the suffix from the image names
new_trainLabels['image2'] = \
new_trainLabels.loc[:, 'image2'].apply(lambda x: '_'.join(x.split('_')[0:2]))
# strip and add jpeg back into file name
new_trainLabels['image2'] = new_trainLabels.loc[:, 'image2'].apply(
lambda x: '_'.join(x.split('_')[0:2]).strip('.jpeg') + '.jpeg')
#trainLabels = trainLabels[0:10]
new_trainLabels.columns = ['train_image_name', 'image']
trainLabels = pandas.merge(trainLabels, new_trainLabels, how='outer', on='image')
trainLabels.drop(['black'], axis=1, inplace=True)
#print(trainLabels.head(100))
trainLabels = trainLabels.dropna()
print(trainLabels.shape)
print("Writing CSV")
trainLabels.to_csv('../labels/trainLabels_master_256_v2.csv', index=False, header=True)
def get_lst_images(self, file_path):
"""
Reads in all files from file path into a list.
INPUT
file_path: specified file path containing the images.
OUTPUT
List of image strings
"""
return [i for i in os.listdir(file_path) if i != '.DS_Store']
class ResizeImages:
"""
DOCSTRING
"""
PIL.ImageFile.LOAD_TRUNCATED_IMAGES = True
def __call__(self):
self.crop_and_resize_images(
path='../data/train/',
new_path='../data/train-resized-256/',
cropx=1800, cropy=1800, img_size=256)
self.crop_and_resize_images(
path='../data/test/',
new_path='../data/test-resized-256/',
cropx=1800, cropy=1800, img_size=256)
def create_directory(self, directory):
"""
Creates a new folder in the specified directory if the folder doesn't exist.
INPUT
directory: Folder to be created, called as "folder/".
OUTPUT
None
"""
if not os.path.exists(directory):
os.makedirs(directory)
return
def crop_and_resize_images(self, path, new_path, cropx, cropy, img_size=256):
"""
Crops, resizes, and stores all images from a directory in a new directory.
INPUT
path: Path where the current, unscaled images are contained.
new_path: Path to save the resized images.
img_size: New size for the rescaled images.
OUTPUT
None
"""
create_directory(new_path)
dirs = [l for l in os.listdir(path) if l != '.DS_Store']
total = 0
for item in dirs:
img = skimage.io.imread(path+item)
y,x,channel = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
img = img[starty:starty+cropy,startx:startx+cropx]
img = skimage.transform.resize(img, (256,256))
skimage.io.imsave(str(new_path + item), img)
total += 1
print("Saving: ", item, total)
return
class RotateImages:
"""
DOCSTRING
"""
#import cv2
def __call__(self):
start_time = time.time()
trainLabels = | pandas.read_csv("../labels/trainLabels_master.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os.path
import math
from IPython.display import display
import random
import scipy.stats as st
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
import sklearn.preprocessing as sk
import sklearn.model_selection as skm
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
import datefinder
class preprocess:
def __init__(self,dataset,target=None,cat_thresh=20,ignore_columns=None):
self.data=dataset.copy()
self.dataset_cat=pd.DataFrame()
self.dataset_num=pd.DataFrame()
self.dataset_datetime=pd.DataFrame()
self.dataset_high_cardinality=pd.DataFrame()
self.target=target
self.ignore_columns=ignore_columns
self.col_i=self.data.columns
self.cat_thresh=cat_thresh;
if(ignore_columns):
self.data=self.data.drop(ignore_columns,axis=1)
self.data=self.data.replace([np.inf,-np.inf],np.NaN)
self.col_ni=self.data.columns
for col in self.data.columns:
if self.data[col].dtype=="object":
try:
con1=dataset[col].astype("str").str.match("^([1-9]|0[1-9]|1[0-9]|2[0-9]|3[0-1])(\.|-|/)([1-9]|0[1-9]|1[0-2])(\.|-|/)([0-9][0-9]|19[0-9][0-9]|20[0-9][0-9])$|^([0-9][0-9]|19[0-9][0-9]|20[0-9][0-9])(\.|-|/)([1-9]|0[1-9]|1[0-2])(\.|-|/)([1-9]|0[1-9]|1[0-9]|2[0-9]|3[0-1])$").any()
con2=dataset[col].astype("str").str.match('(\d{4})-(\d{2})-(\d{2})( (\d{2}):(\d{2}):(\d{2}))?').any()
con3=dataset[col].astype("str").str.match('^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]|(?:Jan|Mar|May|Jul|Aug|Oct|Dec)))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2]|(?:Jan|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec))\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)(?:0?2|(?:Feb))\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9]|(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep))|(?:1[0-2]|(?:Oct|Nov|Dec)))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$').any()
con4=dataset[col].astype("str").str.match('^([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-9]|[0-5][0-9]:)?([0-5]?\d)$').any()
m=datefinder.find_dates(dataset[dataset[col].notnull()][col].astype("str")[0])
if(con1 or con2 or con3 or con4 or len(list(m))):
self.data[col]=pd.to_datetime(self.data[col],errors="coerce")
#self.dataset_datetime
#Generate DateTime Features
time_fe=pd.DataFrame()
if(len(self.data[col].dt.year.value_counts())>1):
time_fe[col+"_year"]=self.data[col].dt.year
if(len(self.data[col].dt.month.value_counts())>1):
time_fe[col+"_month"]=self.data[col].dt.month
if(len(self.data[col].dt.day.value_counts())>1):
time_fe[col+"_day"]=self.data[col].dt.day
if(len(self.data[col].dt.hour.value_counts())>1):
time_fe[col+"_hour"]=self.data[col].dt.hour
if(len(self.data[col].dt.minute.value_counts())>1):
time_fe[col+"_minute"]=self.data[col].dt.minute
if(len(self.data[col].dt.second.value_counts())>1):
time_fe[col+"_second"]=self.data[col].dt.second
if(len(self.data[col].dt.dayofweek.value_counts())>1):
time_fe[col+"_dayofweek"]=self.data[col].dt.dayofweek
#print(self.data[col])
self.data=self.data.drop(col,axis=1)
self.data=pd.concat([self.data,time_fe],axis=1)
except:
continue
#display(self.data)
#display(self.data.dtypes)
for col in self.data.columns:
if(self.data[col].nunique()<cat_thresh):
self.dataset_cat[col]=self.data[col]
elif(self.data[col].dtype=='object' and self.data[col].nunique()>cat_thresh):
self.dataset_high_cardinality[col]=self.data[col]
elif((self.data[col].dtype=='int64' or self.data[col].dtype=='float64') and self.data[col].nunique()>cat_thresh):
self.dataset_num[col]=self.data[col]
def impute_missing(self,numerical_imputation="mean",categorical_imputation="mode"):
dataset_high_cardinality=pd.DataFrame()
dataset_cat=pd.DataFrame()
if(numerical_imputation=="mean"):
dataset_num= self.dataset_num.fillna(self.dataset_num.mean())
elif(numerical_imputation=="median"):
dataset_num= self.dataset_num.fillna(self.dataset_num.median())
elif(numerical_imputation=="mode"):
dataset_num= self.dataset_num.fillna(self.dataset_num.mode().iloc[0,:])
if(categorical_imputation=="mode"):
if(not self.dataset_cat.empty):
dataset_cat= self.dataset_cat.fillna(self.dataset_cat.mode().iloc[0,:])
if(not self.dataset_high_cardinality.empty):
dataset_high_cardinality= self.dataset_high_cardinality.fillna(self.dataset_high_cardinality.mode().iloc[0,:])
self.data=pd.concat([dataset_num,dataset_cat,dataset_high_cardinality],axis=1)
return self.data
def handle_outliers(self,method="iqr",outlier_threshold=2,strategy="replace_lb_ub",columns="all"):
if(method=="iqr"):
if columns=="all":
for col in self.dataset_num:
q1= self.data[col].describe()["25%"]
q3= self.data[col].describe()["75%"]
iqr=q3-q1
lb=q1-(1.5*iqr)
ub=q3+(1.5*iqr)
out= self.data[( self.data[col]<lb) | ( self.data[col]>ub)]
num_o=out.shape[0]
p=(num_o/self.data.shape[0])*100
if(p<outlier_threshold and p>0):
if(strategy=="replace_lb_ub"):
outlier_dict={}.fromkeys( self.data[ self.data[col]>ub][col],ub)
outlier_dict.update({}.fromkeys( self.data[ self.data[col]<lb][col],lb))
self.data[col]= self.data[col].replace(outlier_dict)
elif(strategy=="replace_mean"):
outlier_dict_mean={}.fromkeys( self.data[( self.data[col]<lb) | ( self.data[col]>ub)][col], self.data[col].mean())
self.data[col]= self.data[col].replace(outlier_dict_mean)
elif(strategy=="replace_median"):
outlier_dict_median={}.fromkeys( self.data[( self.data[col]<lb) | ( self.data[col]>ub)][col], self.data[col].median())
self.data[col]= self.data[col].replace(outlier_dict_median)
elif(strategy=="remove"):
#outlier_index=data[(data[col]<lb) | (data[col]>ub)].index
#print()
self.data= self.data[( self.data[col]>lb) & (self.data[col]<ub)]
else:
for col in columns:
if(col in self.dataset_num):
q1= self.data[col].describe()["25%"]
q3= self.data[col].describe()["75%"]
iqr=q3-q1
lb=q1-(1.5*iqr)
ub=q3+(1.5*iqr)
out= self.data[(self.data[col]<lb) | (self.data[col]>ub)]
num_o=out.shape[0]
p=(num_o/self.data.shape[0])*100
if(p<outlier_threshold and p>0):
if(strategy=="replace_lb_ub"):
outlier_dict={}.fromkeys( self.data[ self.data[col]>ub][col],ub)
outlier_dict.update({}.fromkeys( self.data[ self.data[col]<lb][col],lb))
self.data[col]= self.data[col].replace(outlier_dict)
elif(strategy=="replace_mean"):
outlier_dict_mean={}.fromkeys( self.data[( self.data[col]<lb) | ( self.data[col]>ub)][col], self.data[col].mean())
self.data[col]= self.data[col].replace(outlier_dict_mean)
elif(strategy=="replace_median"):
outlier_dict_median={}.fromkeys( self.data[( self.data[col]<lb) | ( self.data[col]>ub)][col], self.data[col].median())
self.data[col]= self.data[col].replace(outlier_dict_median)
elif(strategy=="remove"):
#outlier_index=data[(data[col]<lb) | (data[col]>ub)].index
#print()
self.data= self.data[(self.data[col]>lb) & (self.data[col]<ub)]
return self.data
def encode_data(self,strategy="one_hot_encode",high_cardinality="frequency",drop_first=True,ordinal_map=None,categorical_features="auto",encode_map=None):
data= self.data
target_encode=None
data=self.impute_missing()
#print(categorical_features)
self.categorical_features=categorical_features
self.high_cardinality=high_cardinality
if(self.target):
if(data[self.target].dtype=="object"):
label_encoder = LabelEncoder()
data[self.target]=label_encoder.fit_transform(data[self.target])
target_encode=data[self.target]
data=data.drop(self.target,axis=1)
if(self.categorical_features=="auto"):
self.categorical_features=[]
self.high_cardinality_features=[]
for col in data.columns:
if(data[col].dtype=="object" and data[col].nunique()<self.cat_thresh):
self.categorical_features.append(col)
elif(data[col].dtype=="object" and data[col].nunique()>self.cat_thresh):
self.high_cardinality_features.append(col)
if(self.high_cardinality=="frequency"):
self.hc_frequency_map={}
for col in self.high_cardinality_features:
self.hc_frequency_map[col]=dict(data[col].value_counts())
data[col]=data[col].map(self.hc_frequency_map[col])
if strategy=="one_hot_encode":
self.oh_map={}
for col in self.categorical_features:
self.oh_map[col]=OneHotEncoder()
oh_encode=pd.DataFrame(self.oh_map[col].fit_transform(data[col].values.reshape(-1,1)).toarray(),columns=[col+"_"+s for s in sorted(data[col].unique())])
data=data.drop(col,axis=1)
if(drop_first):
oh_encode=oh_encode.iloc[:,1:]
data=pd.concat([data,oh_encode],axis=1)
elif strategy=="label_encode":
self.lb_map={}
for col in self.categorical_features:
self.lb_map[col] = LabelEncoder()
data[col]=self.lb_map[col].fit_transform(data[col])
elif strategy=="ordinal_encode":
if not ordinal_map:
raise ValueError("ordinal_map should not be None for Ordinal Encoding")
else:
for key,value in ordinal_map.items():
#num=list(range(0,len(value)))
#map_d = {value[i]: num[i] for i in range(len(value))}
data[key]=data[key].map(value)
elif strategy=="frequency" or strategy=="count":
self.frequency_map={}
for col in self.categorical_features:
self.frequency_map[col]=dict(data[col].value_counts())
data[col]=data[col].map(self.frequency_map[col])
elif strategy=="hybrid":
if not encode_map:
raise ValueError("encode_map should not be None for Hybrid Encoding")
else:
for key,value in encode_map.items():
if(key=="one_hot_encode"):
data=pd.get_dummies(data,columns=value,drop_first=drop_first)
elif(key=="label_encode"):
for col in value:
label_encoder = LabelEncoder()
data[col]=label_encoder.fit_transform(data[col])
elif(key=="ordinal_encode"):
for k,v in value.items():
num=list(range(0,len(v)))
map_d = {v[i]: num[i] for i in range(len(v))}
data[k]=data[k].map(map_d)
if(self.target):
data=pd.concat([data,target_encode],axis=1)
self.data=data
return self.data
def normalize(self,method="min_max"):
data=self.data
target=None
if(self.target):
target=data[self.target]
data=data.drop(self.target,axis=1)
dataset_num=pd.DataFrame()
dataset_cat=pd.DataFrame()
for col in data.columns:
if((data[col].dtype=='int64' or data[col].dtype=='float64')):
dataset_num[col]=data[col]
else:
dataset_cat[col]=data[col]
if(method=="min_max"):
self.sc=sk.MinMaxScaler()
col=dataset_num.columns
dataset_num=pd.DataFrame(self.sc.fit_transform(dataset_num),columns=col)
elif(method=="standard"):
self.sc=sk.StandardScaler()
col=dataset_num.columns
dataset_num=pd.DataFrame(self.sc.fit_transform(dataset_num),columns=col)
elif(method=="robust"):
self.sc=sk.RobustScaler()
col=dataset_num.columns
dataset_num=pd.DataFrame(self.sc.fit_transform(dataset_num),columns=col)
if(self.target):
data=pd.concat([dataset_num,dataset_cat,target],axis=1)
else:
data=pd.concat([dataset_num,dataset_cat],axis=1)
self.data=data
return self.data
def preprocess_data(self,impute_missing=True,handle_outliers=True,encode_data=True,normalize=True,
numerical_imputation="mean",categorical_imputation="mode",cat_thresh=10,
outlier_method="iqr",outlier_threshold=2,outlier_strategy="replace_lb_ub",outlier_columns="all",
encoding_strategy="one_hot_encode",high_cardinality_encoding="frequency",encode_drop_first=True,ordinal_map=None,encoding_categorical_features="auto",encode_map=None,
normalization_strategy="min_max",verbose=1
):
print("Part-1 Data PreProcessing Started...")
print(10*"=")
self.missing=impute_missing
self.outliers=handle_outliers
self.encode=encode_data
self.scale=normalize
self.numerical_imputation=numerical_imputation
self.categorical_imputation=categorical_imputation
self.encode_strategy=encoding_strategy
self.drop_first=encode_drop_first
self.ordinal_map=ordinal_map
self.categorical_features=encoding_categorical_features
self.encode_map=encode_map
if impute_missing:
if(verbose):
print("Handling Missing Values")
self.impute_missing(numerical_imputation,categorical_imputation)
if(verbose):
print(30*"=")
if handle_outliers:
if(verbose):
print("Handling Outliers Values")
self.handle_outliers(outlier_method,outlier_threshold,outlier_strategy,outlier_columns)
if(verbose):
print(30*"=")
if encode_data:
if(verbose):
print("Encoding Data")
self.encode_data(encoding_strategy,high_cardinality_encoding,encode_drop_first,ordinal_map,encoding_categorical_features,encode_map)
if(verbose):
print(30*"=")
if normalize:
if(verbose):
print("Normaliziling Values")
self.normalize(normalization_strategy)
if(verbose):
print(30*"=")
self.p_columns=self.data.columns
return self.data
def preprocess_data_new(self,new_data):
if(type(new_data)==list):
new_data=[new_data]
if(len(new_data[0])==(len(self.col_i)-1)):
col_d=list(self.col_i)
col_d.remove(self.target)
new_data= | pd.DataFrame(new_data,columns=col_d) | pandas.DataFrame |
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torchaudio
from constants import INPUT_SAMPLE_RATE, TARGET_SAMPLE_RATE
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class SegmentationDataset(Dataset):
"""Base class for FixedSegmentationDataset and RandomSegmentationDataset"""
def __init__(
self,
path_to_dataset: str,
split_name: str,
) -> None:
"""
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
"""
super().__init__()
self.path_to_dataset = Path(path_to_dataset)
self.split_name = split_name
self.input_sr = INPUT_SAMPLE_RATE
self.target_sr = TARGET_SAMPLE_RATE
self.in_trg_ratio = self.input_sr / self.target_sr
self.trg_in_ratio = 1 / self.in_trg_ratio
# load the talks and the actual segments
self.talks_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_talks.tsv", sep="\t", index_col=0
)
self.segments_df = pd.read_csv(
self.path_to_dataset / f"{self.split_name}_segments.tsv",
sep="\t",
index_col=0,
)
self.columns = ["talk_id", "start", "end", "duration", "included"]
# to calculate percentage of positive examples
self.n_pos, self.n_all = 0, 0
def _secs_to_outframes(self, x):
# from seconds to output space
return np.round(x * self.target_sr).astype(int)
def _outframes_to_inframes(self, x):
# from output space to input space
return np.round(x * self.in_trg_ratio).astype(int)
def _inframes_to_outframes(self, x):
# from input space to output space
return np.round(x * self.trg_in_ratio).astype(int)
def _secs_to_inframes(self, x):
# from seconds to input space
return np.round(x * self.input_sr).astype(int)
def _get_targets_for_talk(self, sgm_df: pd.DataFrame, talk_id: str) -> pd.DataFrame:
"""
Given a segmentation of a talk (sgm_df), find for
each random segment the true_starts and true_ends that it includes.
They are in string form separated by commas.
Ff they are none, an empty string is passed.
Args:
sgm_df (pd.DataFrame): a random segmentation of a wav
talk_id (str): unique id for the wav
Returns:
pd.DataFrame: sgm_df but with the 'included' column completed
"""
true_sgm_df = self.segments_df.loc[self.segments_df.talk_id == talk_id]
talk_targets = np.zeros(
self.talks_df.loc[self.talks_df.id == talk_id, "total_frames"].values[0]
)
for idx, sgm in true_sgm_df.iterrows():
talk_targets[sgm.start : sgm.end] = 1
for idx, sgm in sgm_df.iterrows():
sgm_targets = self._get_targets_for_segment(
talk_targets[sgm.start : sgm.end]
)
sgm_df.loc[idx, "included"] = (
",".join([f"{s}:{e}" for s, e in sgm_targets]) if sgm_targets else "NA"
)
return sgm_df
def _get_targets_for_segment(self, true_points: np.array) -> list[list[int]]:
"""
Extracts the start and end points of segments in the output space
from a binary vector defining the labels in the input space
Args:
true_points (np.array):
binary label for each frame in the input space of a random segment
Returns:
list[list[int]]: list of tuples (start, end) in the output space
where each tuple defines the start and end of a the true included points
"""
points_of_change = list(np.where(true_points[1:] != true_points[:-1])[0] + 1)
targets = []
for s, e in zip([0] + points_of_change, points_of_change + [len(true_points)]):
if true_points[s] == 1:
s = self._inframes_to_outframes(s)
e = self._inframes_to_outframes(e)
# increase start of next segment if overlaps with end of the prev one
if targets and s <= targets[-1][-1]:
s += 1
targets.append([s, e])
self.n_pos += e - s
self.n_all += self._inframes_to_outframes(len(true_points))
return targets
def _construct_target(self, segment: pd.Series) -> torch.FloatTensor:
"""
Given a random segment, constructs its one-hot target tensor in the output space
"""
target_len = self._inframes_to_outframes(segment.duration)
target = torch.zeros(target_len, dtype=torch.float)
if segment.included != "NA":
for s_e in segment.included.split(","):
s, e = s_e.split(":")
s = int(s)
e = min(int(e), target_len + 1)
target[s:e] = 1
return target
class FixedSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str,
segment_length_secs: int = 20,
inference_times: int = 1,
) -> None:
"""
Segmentation dataset to be used during inference
Creates a pool of examples from a fixed-length segmentation of a wav
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
inference_times (int, optional):
How many times to perform inference on different fixed-length segmentations.
Defaults to 1.
"""
super().__init__(path_to_dataset, split_name)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
self.inference_times = inference_times
def generate_fixed_segments(self, talk_id: str, i: int) -> None:
"""
Generates a fixed-length segmentation of a wav
with "i" controlling the begining of the segmentation
so that different values of "i" produce different segmentations
Args:
talk_id (str): unique wav identifier
i (int): indicates the current inference time
and is used to produce a different fixed-length segmentation
minimum allowed is 0 and maximum allowed is inference_times - 1
"""
talk_info = self.talks_df.loc[self.talks_df["id"] == talk_id]
self.talk_path = talk_info["path"].values[0]
self.duration_outframes = self._inframes_to_outframes(
self.talks_df.loc[self.talks_df["id"] == talk_id, "total_frames"].values[0]
)
self.duration_inframes = int(talk_info["total_frames"])
self.fixed_segments_df = pd.DataFrame(columns=self.columns)
start = round(self.segment_length_inframes / self.inference_times * i)
if start > self.duration_inframes:
start = 0
segmentation = np.arange(
start, self.duration_inframes, self.segment_length_inframes
).astype(int)
if segmentation[0] != 0:
segmentation = np.insert(segmentation, 0, 0)
if segmentation[-1] != self.duration_inframes:
if self.duration_inframes - segmentation[-1] < self._secs_to_inframes(2):
segmentation[-1] = self.duration_inframes
else:
segmentation = np.append(segmentation, self.duration_inframes)
self.fixed_segments_df["talk_id"] = talk_id
self.fixed_segments_df["start"] = segmentation[:-1]
self.fixed_segments_df["end"] = segmentation[1:]
self.fixed_segments_df["duration"] = (
self.fixed_segments_df.end - self.fixed_segments_df.start
)
# fill-in targets
self.fixed_segments_df = self._get_targets_for_talk(
self.fixed_segments_df, talk_id
)
def __len__(self) -> int:
return len(self.fixed_segments_df)
def __getitem__(
self, index: int
) -> Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
"""
Loads the data for this fixed-length segment
Args:
index (int): segment id in the self.fixed_segments_df
Returns:
Tuple[torch.FloatTensor, torch.FloatTensor, int, int]:
0: waveform of the segment (input space)
1: target tensor of the segment (output space)
2: starting frame of the segment (output space)
3: ending frame of the segment (output space)
"""
segment = self.fixed_segments_df.iloc[index]
waveform, _ = torchaudio.backend.sox_io_backend.load(
self.talk_path, frame_offset=segment.start, num_frames=segment.duration
)
start = self._inframes_to_outframes(segment.start + 1e-6)
end = self._inframes_to_outframes(segment.end + 1e-6)
target = self._construct_target(segment)
return waveform[0], target, start, end
class RandomSegmentationDataset(SegmentationDataset):
def __init__(
self,
path_to_dataset: str,
split_name: str = "train",
segment_length_secs: int = 20,
seed: int = None,
) -> None:
"""
Segmentation dataset to be used during training.
Creates a pool of examples from a random segmentation of collection of wavs
Args:
path_to_dataset (str): absolute path to the directory
of _talks.tsv and _segments.tsv for the dataset
split_name (str): name of the dataset split. Defaults to train.
segment_length_secs (int, optional):
The length of the fixed segments in seconds. Defaults to 20.
seed (int, optional): The random seed to be used for the random segmentation.
Defaults to None
"""
super().__init__(path_to_dataset, split_name)
if seed is not None:
np.random.seed(seed)
self.segment_length_outframes = self._secs_to_outframes(segment_length_secs)
self.max_segment_outframes_overlap = self._secs_to_outframes(
segment_length_secs / 10
)
self.segment_length_inframes = self._secs_to_inframes(segment_length_secs)
# populate the dataset
self.generate_random_segments()
self.pos_class_percentage = self.n_pos / self.n_all
def generate_random_segments(self) -> None:
"""
Creates a new dataset by randomly segmenting each talk
and finding the true targets that correspond to every random segment
"""
print(
f"Generating random segments for {self.path_to_dataset} and {self.split_name} split ..."
)
self.random_segments_df = pd.concat(
[
self._get_targets_for_talk(self._segment_talk(talk), talk["id"])
for _, talk in tqdm(self.talks_df.iterrows())
],
ignore_index=True,
)
def _segment_talk(self, talk: pd.Series) -> pd.DataFrame:
"""
Produces a random segmentation of a given talk from the talks_df
"""
rnd_sgm_df = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
import sys,os
sys.path.append(os.getcwd()+'/src/gym-auction_emulator')
import gym, gym_auction_emulator
import random
from operator import itemgetter
import torch
import numpy as np
from collections import deque
import cloudpickle
import configparser
from dqn import DQN
from reward_net import RewardNet
from model import set_seed
import numpy as np
import pandas as pd
class RlBidAgent():
def _load_config(self):
"""
Parse the config.cfg file
"""
cfg = configparser.ConfigParser(allow_no_value=True)
env_dir = os.path.dirname(__file__)
cfg.read(env_dir + '/config.cfg')
self.exp_type = str(cfg['experiment_type']['type'])
self.T = int(cfg[self.exp_type]['T']) # Number of timesteps in each episode
def __init__(self):
self._load_config()
# Beta parameter adjsuting the lambda parameter, that regulates the agent's bid amount
self.BETA = [-0.08, -0.03, -0.01, 0, 0.01, 0.03, 0.08]
# Starting value of epsilon in the adaptive eps-greedy policy
self.eps = 0.9
# Parameter controlling the annealing speed of epsilon
self.anneal = 2e-5
if self.exp_type in ('improved_drlb', 'improved_drlb_eval'):
# DQN Network to learn Q function
self.dqn_agent = DQN(state_size = 6, action_size = 7)
# Reward Network to learn the reward function
self.reward_net = RewardNet(state_action_size = 7, reward_size = 1)
else:
self.dqn_agent = DQN(state_size = 7, action_size = 7)
self.reward_net = RewardNet(state_action_size = 8, reward_size = 1)
# Number of timesteps in each episode (4 15min intervals x 24 hours = 96)
# self.T = 672
# Initialize the DQN action for t=0 (index 3 - no adjustment of lambda, 0 ind self.BETA)
self.dqn_action = 3
self.ctl_lambda = None
# Arrays saving the training history
self.step_memory = []
self.episode_memory = []
# Params for tracking the progress
self.global_T = 0 # Tracking the global time step
self.episode_budgets = None
self.budget = None
self.total_wins = 0
self.total_rewards = 0
self.rewards_prev_t = 0
self.rewards_prev_t_ratio = 0
self.rnet_r = 0
self.wins_e = 0
self.rewards_e = 0
self.ROL = self.T
self.ROL_ratio = 1
def _get_state(self):
"""
Returns the state that will be used as input in the DQN
"""
if self.exp_type in ('improved_drlb', 'improved_drlb_eval'):
return np.asarray([self.rem_budget_ratio, # 2. the ratio of the remaining budget to total available budget at time-step t
self.ROL_ratio, # 3. The ratio of the number of Lambda regulation opportunities left
self.BCR, # 4. Budget consumption rate
self.CPI, # 5. Cost per impression between t-1 and t, in relation to the highest cost possible in the training set (300)
self.WR, # 6. Auction win rate at state t
self.rewards_prev_t_ratio]) # 7. Ratio of acquired/total clicks at timestep t-1
else:
return np.asarray([self.t_step, # 1. Current time step
self.rem_budget, # 2. the remaining budget at time-step t
self.ROL, # 3. The number of Lambda regulation opportunities left
self.BCR, # 4. Budget consumption rate
self.CPM, # 5. Cost per mille of impressions between t-1 and t:
self.WR, # 6. Auction win rate at state t
self.rewards_prev_t]) # 7. Clicks acquired at timestep t-1
def _reset_episode(self):
"""
Function to reset the state when episode changes
"""
# Reset the count of time steps
self.t_step = 0
# Lambda regulation parameter - set according to the greedy approximation algorithm, as suggested by the paper
if self.exp_type == 'vanilla_drlb':
self.ctl_lambda = 0.01 if self.budget is None else self.calc_greedy(self.greedy_memory, self.budget)
# Clean up the array used to save all the necessary information to solve the knapsack problem with the GA algo
self.greedy_memory = []
elif self.exp_type == 'episode_lambda':
self.ctl_lambda = 0.01
else:
pass
# Next episode -> next step
self._reset_step()
# Set the budget for the episode
self.budget = self.episode_budgets.pop(0)
self.rem_budget = self.budget
self.rem_budget_ratio = 1
self.budget_spent_t = 0
self.budget_spent_e = 0
if self.exp_type not in ('free_lambda', 'free_lambda_eval', 'improved_drlb', 'improved_drlb_eval'):
self.ROL = self.T # 3. The number of Lambda regulation opportunities left
self.ROL_ratio = 1
self.cur_day = 0
self.cur_min = 0
self.total_wins += self.rewards_e
self.total_rewards += self.wins_e
# Impressions won in each episode
self.wins_e = 0
# Clicks won in each episode
self.rewards_e = 0
# Dict and Value necessary for learning the RewardNet
self.reward_net.V = 0
self.reward_net.S = []
def _update_step(self):
"""
Function that is called before transitioning into step t+1 (updates state t)
"""
self.global_T += 1
self.t_step += 1
self.prev_budget = self.rem_budget
self.rem_budget = self.prev_budget - self.budget_spent_t
self.budget_spent_e += self.budget_spent_t
self.rewards_prev_t = self.reward_t
self.ROL -= 1
self.BCR = 0 if self.prev_budget == 0 else -((self.rem_budget - self.prev_budget) / self.prev_budget)
if self.exp_type in ('improved_drlb', 'improved_drlb_eval'):
self.CPI = 0 if self.wins_t == 0 else (self.cost_t / self.wins_t) / 300
self.rewards_prev_t_ratio = 1 if self.possible_clicks_t == 0 else self.reward_t / self.possible_clicks_t
self.ROL_ratio = self.ROL / self.T
self.rem_budget_ratio = self.rem_budget / self.budget
else:
self.CPM = 0 if self.wins_t == 0 else ((self.cost_t / self.wins_t) * 1000)
self.WR = self.wins_t / self.imp_opps_t
# Adaptive eps-greedy policy
self.eps = max(0.95 - self.anneal * self.global_T, 0.05)
def _reset_step(self):
"""
Function to call every time a new time step is entered.
"""
self.possible_clicks_t = 0
self.total_rewards_t = 0
self.reward_t = 0
self.cost_t = 0
self.wins_t = 0
self.imp_opps_t = 0
self.BCR = 0
if self.exp_type in ('improved_drlb', 'improved_drlb_eval'):
self.CPI = 0
else:
self.CPM = 0
self.WR = 0
self.budget_spent_t = 0
def _update_reward_cost(self, bid, reward, potential_reward, cost, win):
"""
Internal function to update reward and action to compute the cumulative
reward and cost within the given step.
"""
self.possible_clicks_t += potential_reward
if win:
self.budget_spent_t += cost
self.wins_t += 1
self.wins_e += 1
self.total_wins += 1
self.reward_t += reward
self.rewards_e += reward
self.total_rewards += reward
self.cost_t += cost
def _model_upd(self, eval_mode):
if not eval_mode:
self.reward_net.step() # update reward net
next_state = self._get_state() # observe state s_t+1 (state at the beginning of t+1)
# get action a_t+1 (adjusting lambda_t to lambda_t+1) from the adaptive greedy policy
a_beta = self.dqn_agent.act(next_state, eps=self.eps, eval_mode=eval_mode)
self.ctl_lambda *= (1 + self.BETA[a_beta])
if not eval_mode:
# updates for the RewardNet
sa = np.append(self.cur_state, self.BETA[self.dqn_action]) #self.dqn_action) # state-action pair for t
self.rnet_r = float(self.reward_net.act(sa)) # get reward r_t from RewardNet
self.reward_net.V += self.reward_t
self.reward_net.S.append((self.cur_state, self.BETA[self.dqn_action]))
# Store in D1 and sample a mini batch and perform grad-descent step
self.dqn_agent.step(self.cur_state, self.dqn_action, self.rnet_r, next_state)
self.cur_state = next_state # set state t+1 as state t
self.dqn_action = a_beta # analogously with the action t+1
def act(self, obs, eval_mode):
"""
This function gets called with every bid request.
By looking at the weekday and hour to progress between the steps and
episodes during training.
Returns the bid decision based on the scaled version of the
bid price using the DQN agent output.
"""
# within the time step
if obs['min'] == self.cur_min and obs['weekday'] == self.cur_day:
pass
# within the episode, changing the time step
elif obs['min'] != self.cur_min and obs['weekday'] == self.cur_day:
self._update_step()
self._model_upd(eval_mode)
self.cur_min = obs['min']
# save history
self.step_memory.append([self.global_T, int(self.rem_budget), self.ctl_lambda, self.eps, self.dqn_action, self.dqn_agent.loss, self.rnet_r, self.reward_net.loss])
self._reset_step()
# transition to next episode
elif obs['weekday'] != self.cur_day:
self._update_step()
self._model_upd(eval_mode)
self.step_memory.append([self.global_T, int(self.rem_budget), self.ctl_lambda, self.eps, self.dqn_action, self.dqn_agent.loss, self.rnet_r, self.reward_net.loss])
# Updates for the RewardNet at the end of each episode (only when training)
if not eval_mode:
for (s, a) in self.reward_net.S:
sa = tuple(np.append(s, a))
max_r = max(self.reward_net.get_from_M(sa), self.reward_net.V)
self.reward_net.add_to_M(sa, max_r)
self.reward_net.add(sa, max_r)
print("Episode Result with Step={} Budget={} Spend={} impressions={} clicks={}".format(self.global_T, int(self.budget), int(self.budget_spent_e), self.wins_e, self.rewards_e))
# Save history
self.episode_memory.append([self.budget, int(self.budget_spent_e), self.wins_e, self.rewards_e])
self._reset_episode()
self.cur_day = obs['weekday']
self.cur_min = obs['min']
self.imp_opps_t += 1
bid = self.calc_bid(obs['pCTR'])
if self.exp_type == 'vanilla_drlb':
self.greedy_memory.append([obs['pCTR'], obs['payprice'], obs['pCTR']/max(obs['payprice'], 1)])
return bid
def calc_bid(self, imp_value):
# Calculate the theoretically optimal bid
bid_amt = round(imp_value/self.ctl_lambda, 2)
curr_budget_left = self.rem_budget - self.budget_spent_t
if bid_amt > curr_budget_left:
bid_amt = curr_budget_left
return bid_amt
def calc_greedy(self, items, budget_limit):
# Borrowed from: https://bitbucket.org/trebsirk/algorithms/src/master/knapsack.py
# Greedy approximation algorithm (Dantzig, 1957)
bids = []
spending = 0
ctr = 0
items_sorted = sorted(items, key=itemgetter(2), reverse=True)
while len(items_sorted) > 0:
item = items_sorted.pop()
if item[1] + spending <= budget_limit: # should be item[1], currently adds pCTR instead of price?????
bids.append(item)
spending += bids[-1][1]
ctr += bids[-1][0]
else:
break
ctrs = np.array(bids)[:,0]
costs = np.array(bids)[:,1]
# Take the max lambda to be more conservative at the beginning of a time step
opt_lambda = np.max(np.divide(ctrs, costs))
return opt_lambda
def main():
# Instantiate the Environment and Agent
env = gym.make('AuctionEmulator-v0')
env.seed(0)
set_seed()
agent = RlBidAgent()
train_budget = env.bid_requests.payprice.sum()/8
# Set budgets for each episode
budget_proportions = []
for episode in env.bid_requests.weekday.unique():
budget_proportions.append(len(env.bid_requests[env.bid_requests.weekday == episode])/env.total_bids)
for i in range(len(budget_proportions)):
budget_proportions[i] = round(train_budget * budget_proportions[i])
epochs = 400
for epoch in range(epochs):
print("Epoch: ", epoch+1)
obs, done = env.reset()
agent.episode_budgets = budget_proportions.copy()
if agent.exp_type in ('free_lambda', 'improved_drlb'):
agent.ctl_lambda = 0.01
agent._reset_episode()
agent.cur_day = obs['weekday']
agent.cur_hour = obs['hour']
agent.cur_state = agent._get_state() # observe state s_0
while not done: # iterate through the whole dataset
bid = agent.act(obs, eval_mode=False) # Call agent action given each bid request from the env
next_obs, cur_reward, potential_reward, cur_cost, win, done = env.step(bid) # Get information from the environment based on the agent's action
agent._update_reward_cost(bid, cur_reward, potential_reward, cur_cost, win) # Agent receives reward and cost from the environment
obs = next_obs
print("Episode Result with Step={} Budget={} Spend={} impressions={} clicks={}".format(agent.global_T, int(agent.budget), int(agent.budget_spent_e), agent.wins_e, agent.rewards_e))
agent.episode_memory.append([agent.budget, int(agent.budget_spent_e), agent.wins_e, agent.rewards_e])
# Saving models and history
if ((epoch + 1) % 25) == 0:
PATH = 'models/model_state_{}.tar'.format(epoch+1)
torch.save({'local_q_model': agent.dqn_agent.qnetwork_local.state_dict(),
'target_q_model':agent.dqn_agent.qnetwork_target.state_dict(),
'q_optimizer':agent.dqn_agent.optimizer.state_dict(),
'rnet': agent.reward_net.reward_net.state_dict(),
'rnet_optimizer': agent.reward_net.optimizer.state_dict()}, PATH)
f = open('models/rnet_memory_{}.txt'.format(epoch+1), "wb")
cloudpickle.dump(agent.dqn_agent.memory, f)
f.close()
f = open('models/rdqn_memory_{}.txt'.format(epoch+1), "wb")
cloudpickle.dump(agent.reward_net.memory, f)
f.close()
pd.DataFrame(agent.step_memory).to_csv('models/step_history_{}.csv'.format(epoch+1),header=None,index=False)
agent.step_memory=[]
| pd.DataFrame(agent.episode_memory) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# ==============================================================================
# Modules
# ==============================================================================
# Built-ins
import os, sys, time, datetime, copy, warnings
from typing import Dict, Union, Any
from collections import defaultdict, OrderedDict
from collections.abc import Mapping, Hashable
from itertools import combinations, product
# PyData
import pandas as pd
import numpy as np
import networkx as nx
import xarray as xr
from scipy import stats
from scipy.special import comb
from scipy.spatial.distance import squareform, pdist
# Compositional
from compositional import pairwise_rho, pairwise_phi
# soothsayer_utils
from soothsayer_utils import pv, flatten, assert_acceptable_arguments, is_symmetrical, is_graph, write_object, format_memory, format_header, format_path, is_nonstring_iterable, Suppress, dict_build, dict_filter, is_dict, is_dict_like, is_color, is_number, check_packages, is_query_class
try:
from . import __version__
except ImportError:
__version__ = "ImportError: attempted relative import with no known parent package"
# ===================s
# Transformations
# ===================
# Unsigned network to signed network
def signed(X):
"""
unsigned -> signed correlation
"""
return (X + 1)/2
# ===================
# Converting Networks
# ===================
#Get weights from graph
def get_weights_from_graph(graph, into=np.asarray, weight="weight", generator=False):
weights = map(lambda edge_data: edge_data[-1][weight], graph.edges(data=True))
if generator:
return weights
else:
weights = list(weights)
return into(weights)
# pd.DataFrame 2D to pd.Series
def dense_to_condensed(X, name=None, assert_symmetry=True, tol=None):
if assert_symmetry:
assert is_symmetrical(X, tol=tol), "`X` is not symmetric with tol=`{}`".format(tol)
labels = X.index
index=pd.Index(list(map(frozenset, combinations(labels, 2))), name=name)
data = squareform(X, checks=False)
return pd.Series(data, index=index, name=name)
# pd.Series to pd.DataFrame 2D
def condensed_to_dense(y:pd.Series, fill_diagonal="infer", index=None):
# Check if there are self-interactions
number_of_unique_nodes_in_edges = y.index.map(len).unique()
assert set(number_of_unique_nodes_in_edges) <= {1,2}, "Number of unique nodes in edge must be either 1 or 2"
if isinstance(fill_diagonal, str):
if fill_diagonal == "infer":
if number_of_unique_nodes_in_edges.min() == 1:
fill_diagonal = None
else:
fill_diagonal = np.nan
# Need to optimize this
data = defaultdict(dict)
for edge, w in y.iteritems():
number_of_unique_nodes = len(edge)
if len(edge) == 2:
node_a, node_b = tuple(edge)
else:
node_a = node_b = list(edge)[0]
data[node_a][node_b] = data[node_b][node_a] = w
if fill_diagonal is not None:
if is_dict_like(fill_diagonal):
for node in data:
data[node][node] = fill_diagonal[node]
else:
for node in data:
data[node][node] = fill_diagonal
df_dense = pd.DataFrame(data)
if index is None:
index = df_dense.index
return df_dense.loc[index,index]
# Convert networks
def convert_network(data, into, index=None, assert_symmetry=True, tol=1e-10, **attrs):
"""
Convert to and from the following network structures:
* pd.DataFrame (must be symmetrical)
* pd.Series (index must be frozenset of {node_a, node_b})
* Symmetric
* nx.[Di|Ordered]Graph
"""
assert isinstance(data, (pd.DataFrame, pd.Series, Symmetric, nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph)), "`data` must be {pd.DataFrame, pd.Series, Symmetric, nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph}"
assert into in (pd.DataFrame, pd.Series, Symmetric, nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph), "`into` must be {pd.DataFrame, pd.Series, Symmetric, nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph}"
assert into not in {nx.MultiGraph, nx.MultiDiGraph}, "`into` cannot be a `Multi[Di]Graph`"
# self -> self
if isinstance(data, into):
return data.copy()
if isinstance(data, pd.Series):
data = Symmetric(data, **attrs)
if into == Symmetric:
return data
# pd.DataFrame -> Symmetric or Graph
if isinstance(data, pd.DataFrame) and (into in {Symmetric, nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph}):
weights = dense_to_condensed(data, assert_symmetry=assert_symmetry, tol=tol)
if into == Symmetric:
return Symmetric(weights, **attrs)
else:
return Symmetric(weights).to_networkx(into=into, **attrs)
# pd.DataFrame -> pd.Series
if isinstance(data, pd.DataFrame) and (into in {pd.Series}):
return dense_to_condensed(data, assert_symmetry=assert_symmetry, tol=tol)
# Symmetric -> pd.DataFrame, pd.Series, or Graph
if isinstance(data, Symmetric):
# pd.DataFrame
if into == pd.DataFrame:
df = data.to_dense()
if index is None:
return df
else:
assert set(index) <= set(df.index), "Not all `index` values are in `data`"
return df.loc[index,index]
elif into == pd.Series:
return data.weights.copy()
# Graph
else:
return data.to_networkx(into=into, **attrs)
# Graph -> Symmetric
if isinstance(data, (nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph)):
if into == Symmetric:
return Symmetric(data=data, **attrs)
if into == pd.DataFrame:
return Symmetric(data=data, **attrs).to_dense()
if into in {nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph}:
return Symmetric(data=data).to_networkx(into=into, **attrs)
if into == pd.Series:
return convert_network(data=data, into=Symmetric, index=index, assert_symmetry=assert_symmetry, tol=tol).weights
# ===================
# Network Statistics
# ===================
# Connectivity
def connectivity(data, groups:pd.Series=None, include_self_loops=False, tol=1e-10):
"""
Calculate connectivity from pd.DataFrame (must be symmetric), Symmetric, Hive, or NetworkX graph
groups must be dict-like: {node:group}
"""
# This is a hack to allow Hives from hive_networkx
if is_query_class(data, "Hive"):
data = condensed_to_dense(data.weights)
assert isinstance(data, (pd.DataFrame, Symmetric, nx.Graph, nx.DiGraph, nx.OrderedGraph, nx.OrderedDiGraph)), "Must be either a symmetric pd.DataFrame, Symmetric, nx.Graph, or hx.Hive object"
if is_graph(data):
weights = dict()
for edge_data in data.edges(data=True):
edge = frozenset(edge_data[:-1])
weight = edge_data[-1]["weight"]
weights[edge] = weight
weights = pd.Series(weights, name="Weights")#.sort_index()
data = Symmetric(weights)
if isinstance(data, Symmetric):
df_dense = condensed_to_dense(data.weights)
if isinstance(data, pd.DataFrame):
assert is_symmetrical(data, tol=tol)
df_dense = data
df_dense = df_dense.copy()
if not include_self_loops:
np.fill_diagonal(df_dense.values, 0)
#kTotal
k_total = df_dense.sum(axis=1)
if groups is None:
return k_total
else:
groups = pd.Series(groups)
data_connectivity = OrderedDict()
data_connectivity["kTotal"] = k_total
#kWithin
k_within = list()
for group in groups.unique():
idx_nodes = pd.Index(sorted(set(groups[lambda x: x == group].index) & set(df_dense.index)))
k_group = df_dense.loc[idx_nodes,idx_nodes].sum(axis=1)
k_within.append(k_group)
data_connectivity["kWithin"] = pd.concat(k_within)
#kOut
data_connectivity["kOut"] = data_connectivity["kTotal"] - data_connectivity["kWithin"]
#kDiff
data_connectivity["kDiff"] = data_connectivity["kWithin"] - data_connectivity["kOut"]
return pd.DataFrame(data_connectivity)
def density(k:pd.Series):
"""
Density = sum(khelp)/(nGenes * (nGenes - 1))
https://github.com/cran/WGCNA/blob/15de0a1fe2b214f7047b887e6f8ccbb1c681e39e/R/Functions.R#L1963
"""
k_total = k.sum()
number_of_nodes = k.size
return k_total/(number_of_nodes * (number_of_nodes - 1))
def centralization(k:pd.Series):
"""
Centralization = nGenes*(max(khelp)-mean(khelp))/((nGenes-1)*(nGenes-2))
https://github.com/cran/WGCNA/blob/15de0a1fe2b214f7047b887e6f8ccbb1c681e39e/R/Functions.R#L1965
"""
k_max = k.max()
k_mean = k.mean()
number_of_nodes = k.size
return number_of_nodes * (k_max - k_mean)/((number_of_nodes - 1) * (number_of_nodes - 2))
def heterogeneity(k:pd.Series):
"""
Heterogeneity = sqrt(nGenes * sum(khelp^2)/sum(khelp)^2 - 1)
https://github.com/cran/WGCNA/blob/15de0a1fe2b214f7047b887e6f8ccbb1c681e39e/R/Functions.R#L1967
"""
number_of_nodes = k.size
return np.sqrt(number_of_nodes * np.sum(k**2)/np.sum(k)**2 - 1)
# Topological overlap
def topological_overlap_measure(data, into=None, node_type=None, edge_type="topological_overlap_measure", association="network", assert_symmetry=True, tol=1e-10):
"""
Compute the topological overlap for a weighted adjacency matrix
`data` and `into` can be the following network structures/objects:
* pd.DataFrame (must be symmetrical)
* Symmetric
* nx.[Di|Ordered]Graph
====================================================
Benchmark 5000 nodes (iris w/ 4996 noise variables):
====================================================
TOM via rpy2 -> R -> WGCNA: 24 s ± 471 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
TOM via this function: 7.36 s ± 212 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
=================
Acknowledgements:
=================
Original source:
* <NAME> and <NAME>
https://www.rdocumentation.org/packages/WGCNA/versions/1.67/topics/TOMsimilarity
https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-9-559
Implementation adapted from the following sources:
* Credits to @scleronomic
https://stackoverflow.com/questions/56574729/how-to-compute-the-topological-overlap-measure-tom-for-a-weighted-adjacency-ma/56670900#56670900
* Credits to @benmaier
https://github.com/benmaier/GTOM/issues/3
"""
# Compute topological overlap
def _compute_tom(A):
# Prepare adjacency
np.fill_diagonal(A, 0)
# Prepare TOM
A_tom = np.zeros_like(A)
# Compute TOM
L = np.matmul(A,A)
ki = A.sum(axis=1)
kj = A.sum(axis=0)
MINK = np.array([ np.minimum(ki_,kj) for ki_ in ki ])
A_tom = (L+A) / (MINK + 1 - A)
np.fill_diagonal(A_tom,1)
return A_tom
# Check input type
if into is None:
into = type(data)
node_labels = None
if not isinstance(data, np.ndarray):
if not isinstance(data, pd.DataFrame):
data = convert_network(data, into=pd.DataFrame)
assert np.all(data.index == data.columns), "`data` index and columns must have identical ordering"
np.fill_diagonal(data.values,0) #! redundant
node_labels = data.index
# Check input type
if assert_symmetry:
assert is_symmetrical(data, tol=tol), "`data` is not symmetric"
assert np.all(data >= 0), "`data` weights must ≥ 0"
# Compute TOM
A_tom = _compute_tom(np.asarray(data))
if assert_symmetry:
A_tom = (A_tom + A_tom.T)/2
# Unlabeled adjacency
if node_labels is None:
return A_tom
# Labeled adjacency
else:
df_tom = pd.DataFrame(A_tom, index=node_labels, columns=node_labels)
df_tom.index.name = df_tom.columns.name = node_type
return convert_network(df_tom, into=into, assert_symmetry=assert_symmetry, tol=tol, adjacency="network", node_type=node_type, edge_type=edge_type, association=association)
# =======================================================
# Community Detection
# =======================================================
# Graph community detection
def community_detection(graph, n_iter:int=100, weight:str="weight", random_state:int=0, algorithm="louvain", algo_kws=dict()):
assert isinstance(n_iter, int)
assert isinstance(random_state, int)
assert isinstance(algorithm, str)
assert_acceptable_arguments(algorithm, {"louvain", "leiden"})
# Louvain
if algorithm == "louvain":
try:
from community import best_partition
except ModuleNotFoundError:
Exception("Please install `python-louvain` to use {} algorithm".format(algorithm))
# Keywords
_algo_kws = {}
_algo_kws.update(algo_kws)
def partition_function(graph, weight, random_state, algo_kws):
return best_partition(graph, weight=weight, random_state=random_state, **algo_kws)
# Leiden
if algorithm == "leiden":
try:
import igraph as ig
except ModuleNotFoundError:
Exception("Please install `igraph` to use {} algorithm".format(algorithm))
try:
from leidenalg import find_partition, ModularityVertexPartition
except ModuleNotFoundError:
Exception("Please install `leidenalg` to use {} algorithm".format(algorithm))
# Convert NetworkX to iGraph
graph = ig.Graph.from_networkx(graph)
nodes_list = np.asarray(graph.vs["_nx_name"])
# Keywords
_algo_kws = {"partition_type":ModularityVertexPartition, "n_iterations":-1}
_algo_kws.update(algo_kws)
def partition_function(graph, weight, random_state, algo_kws, nodes_list=nodes_list):
node_to_partition = dict()
for partition, nodes in enumerate(find_partition(graph, weights=weight, seed=random_state, **algo_kws)):
mapping = dict(zip(nodes_list[nodes], [partition]*len(nodes)))
node_to_partition.update(mapping)
return node_to_partition
# Get partitions
partitions = dict()
for rs in pv(range(random_state, n_iter + random_state), "Detecting communities via `{}` algorithm".format(algorithm)):
partitions[rs] = partition_function(graph=graph, weight=weight, random_state=rs, algo_kws=_algo_kws)
# Create DataFrame
df = pd.DataFrame(partitions)
df.index.name = "Node"
df.columns.name = "Partition"
return df
# Cluster homogeneity matrix
def cluster_homogeneity(df:pd.DataFrame, edge_type="Edge", iteration_type="Iteration"):
"""
# Create Graph
from soothsayer_utils import get_iris_data
df_adj = get_iris_data(["X"]).iloc[:5].T.corr() + np.random.RandomState(0).normal(size=(5,5))
graph = nx.from_pandas_adjacency(df_adj)
graph.nodes()
# NodeView(('sepal_length', 'sepal_width', 'petal_length', 'petal_width'))
# Community detection (network clustering)
df_louvain = community_detection(graph, n_iter=10, algorithm="louvain")
df_louvain
# Partition 0 1 2 3 4 5 6 7 8 9
# Node
# iris_0 0 0 0 0 0 0 0 0 0 0
# iris_1 1 1 1 1 1 1 1 1 1 1
# iris_2 1 2 2 2 2 1 2 2 2 2
# iris_3 0 1 1 1 1 0 1 1 1 1
# iris_4 2 3 3 3 3 2 3 3 3 3
# Determine cluster homogeneity
df_homogeneity = cluster_homogeneity(df_louvain)
df_homogeneity
# Iteration 0 1 2 3 4 5 6 7 8 9
# Edge
# (iris_1, iris_0) 0 0 0 0 0 0 0 0 0 0
# (iris_2, iris_0) 0 0 0 0 0 0 0 0 0 0
# (iris_3, iris_0) 1 0 0 0 0 1 0 0 0 0
# (iris_4, iris_0) 0 0 0 0 0 0 0 0 0 0
# (iris_1, iris_2) 1 0 0 0 0 1 0 0 0 0
# (iris_3, iris_1) 0 1 1 1 1 0 1 1 1 1
# (iris_4, iris_1) 0 0 0 0 0 0 0 0 0 0
# (iris_3, iris_2) 0 0 0 0 0 0 0 0 0 0
# (iris_4, iris_2) 0 0 0 0 0 0 0 0 0 0
# (iris_4, iris_3) 0 0 0 0 0 0 0 0 0 0
df_homogeneity.mean(axis=1)[lambda x: x > 0.5]
# Edge
# (iris_3, iris_1) 0.8
# dtype: float64
"""
# Adapted from @code-different:
# https://stackoverflow.com/questions/58566957/how-to-transform-a-dataframe-of-cluster-class-group-labels-into-a-pairwise-dataf
# `x` is a table of (n=nodes, p=iterations)
nodes = df.index
iterations = df.columns
x = df.values
n,p = x.shape
# `y` is an array of n tables, each having 1 row and p columns
y = x[:, None]
# Using numpy broadcasting, `z` contains the result of comparing each
# table in `y` against `x`. So the shape of `z` is (n x n x p)
z = x == y
# Reshaping `z` by merging the first two dimensions
data = z.reshape((z.shape[0] * z.shape[1], z.shape[2]))
# Redundant pairs
redundant_pairs = list(map(lambda node:frozenset([node]), nodes))
# Create pairwise clustering matrix
df_pairs = pd.DataFrame(
data=data,
index=pd.Index(list(map(frozenset, product(nodes,nodes))), name=edge_type),
columns=pd.Index(iterations, name=iteration_type),
dtype=int,
).drop(redundant_pairs, axis=0)
return df_pairs[~df_pairs.index.duplicated(keep="first")]
# =======================================================
# Data Structures
# =======================================================
# Symmetrical dataframes represented as augment pd.Series
class Symmetric(object):
"""
An indexable symmetric matrix stored as the lower triangle for space.
Usage:
import soothsayer_utils as syu
import ensemble_networkx as enx
# Load data
X, y, colors = syu.get_iris_data(["X", "y", "colors"])
n, m = X.shape
# Get association matrix (n,n)
method = "pearson"
df_sim = X.T.corr(method=method)
ratio = 0.382
number_of_edges = int((n**2 - n)/2)
number_of_edges_negative = int(ratio*number_of_edges)
# Make half of the edges negative to showcase edge coloring (not statistically meaningful at all)
for a, b in zip(np.random.RandomState(0).randint(low=0, high=149, size=number_of_edges_negative), np.random.RandomState(1).randint(low=0, high=149, size=number_of_edges_negative)):
if a != b:
df_sim.values[a,b] = df_sim.values[b,a] = df_sim.values[a,b]*-1
# Create a Symmetric object from the association matrix
sym_iris = enx.Symmetric(data=df_sim, node_type="iris sample", edge_type=method, name="iris", association="network")
# ====================================
# Symmetric(Name:iris, dtype: float64)
# ====================================
# * Number of nodes (iris sample): 150
# * Number of edges (correlation): 11175
# * Association: network
# * Memory: 174.609 KB
# --------------------------------
# | Weights
# --------------------------------
# (iris_1, iris_0) 0.995999
# (iris_0, iris_2) 0.999974
# (iris_3, iris_0) 0.998168
# (iris_0, iris_4) 0.999347
# (iris_0, iris_5) 0.999586
# ...
# (iris_148, iris_146) 0.988469
# (iris_149, iris_146) 0.986481
# (iris_147, iris_148) 0.995708
# (iris_149, iris_147) 0.994460
# (iris_149, iris_148) 0.999916
devel
=====
2020-June-23
* Replace self._dense_to_condensed to dense_to_condensed
* Dropped math operations
* Added input for Symmetric or pd.Series with a frozenset index
2018-August-16
* Added __add__, __sub__, etc.
* Removed conversion to dissimilarity for tree construction
* Added .iteritems method
Future:
* Use `weights` instead of `data`
Dropped:
Fix the diagonal arithmetic
"""
def __init__(
self,
data,
name=None,
node_type=None,
edge_type=None,
func_metric=None,
association="infer",
assert_symmetry=True,
nans_ok=True,
tol=None,
# fillna=np.nan,
acceptable_associations={"similarity", "dissimilarity", "statistical_test", "network", "infer", None},
**attrs,
):
self._acceptable_associations = acceptable_associations
self.name = name
self.node_type = node_type
self.edge_type = edge_type
self.func_metric = func_metric
self.association = association
self.diagonal = None
self.metadata = dict()
# From Symmetric object
if isinstance(data, type(self)):
if not nans_ok:
assert not np.any(data.weights.isnull()), "Cannot move forward with missing values"
self._from_symmetric(data=data, name=name, node_type=node_type, edge_type=edge_type, func_metric=func_metric, association=association)
# From networkx
if isinstance(data, (nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph)):
self._from_networkx(data=data, association=association)
# From pandas
if isinstance(data, (pd.DataFrame, pd.Series)):
if not nans_ok:
assert not np.any(data.isnull()), "Cannot move forward with missing values"
# From pd.DataFrame object
if isinstance(data, pd.DataFrame):
self._from_pandas_dataframe(data=data, association=association, assert_symmetry=assert_symmetry, nans_ok=nans_ok, tol=tol)
# From pd.Series object
if isinstance(data, pd.Series):
self._from_pandas_series(data=data, association=association)
# Universal
# If there's still no `edge_type` and `func_metric` is not empty, then use this the name of `func_metric`
if (self.edge_type is None) and (self.func_metric is not None):
self.edge_type = self.func_metric.__name__
self.values = self.weights.values
self.number_of_nodes = self.nodes.size
self.number_of_edges = self.edges.size
# self.graph = self.to_networkx(into=graph) # Not storing graph because it will double the storage
self.memory = self.weights.memory_usage()
self.metadata.update(attrs)
self.__synthesized__ = datetime.datetime.utcnow()
# =======
# Utility
# =======
def _infer_association(self, X):
diagonal = np.diagonal(X)
diagonal_elements = set(diagonal)
assert len(diagonal_elements) == 1, "Cannot infer relationships from diagonal because multiple values"
assert diagonal_elements <= {0,1}, "Diagonal should be either 0.0 for dissimilarity or 1.0 for similarity"
return {0.0:"dissimilarity", 1.0:"similarity"}[list(diagonal_elements)[0]]
def _from_symmetric(self,data, name, node_type, edge_type, func_metric, association):
self.__dict__.update(data.__dict__)
# If there's no `name`, then get `name` of `data`
if self.name is None:
self.name = name
# If there's no `node_type`, then get `node_type` of `data`
if self.node_type is None:
self.node_type = node_type
# If there's no `edge_type`, then get `edge_type` of `data`
if self.edge_type is None:
self.edge_type = edge_type
# If there's no `func_metric`, then get `func_metric` of `data`
if self.func_metric is None:
if func_metric is not None:
assert hasattr(func_metric, "__call__"), "`func_metric` must be a function"
self.func_metric = func_metric
# Infer associations
if self.association is None:
assert_acceptable_arguments(association, self._acceptable_associations)
if association != "infer":
self.association = association
def _from_networkx(self, data, association):
assert isinstance(data, (nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph)), "`If data` is a graph, it must be in {nx.Graph, nx.OrderedGraph, nx.DiGraph, nx.OrderedDiGraph}"
assert_acceptable_arguments(association, self._acceptable_associations)
if association == "infer":
if association is None:
association = "network"
assert_acceptable_arguments(association, self._acceptable_associations)
# Propogate information from graph
for attr in ["name", "node_type", "edge_type", "func_metric"]:
if getattr(self, attr) is None:
if attr in data.graph:
value = data.graph[attr]
if bool(value):
setattr(self, attr, value)
# Weights
edge_weights = dict()
for edge_data in data.edges(data=True):
edge = frozenset(edge_data[:-1])
weight = edge_data[-1]["weight"]
edge_weights[edge] = weight
data = pd.Series(edge_weights)
self._from_pandas_series(data=data, association=association)
def _from_pandas_dataframe(self, data:pd.DataFrame, association, assert_symmetry, nans_ok, tol):
if assert_symmetry:
assert is_symmetrical(data, tol=tol), "`X` is not symmetric. Consider dropping the `tol` to a value such as `1e-10` or using `(X+X.T)/2` to force symmetry"
assert_acceptable_arguments(association, self._acceptable_associations)
if association == "infer":
association = self._infer_association(data)
self.association = association
self.nodes = pd.Index(data.index)
self.diagonal = pd.Series(np.diagonal(data), index=data.index, name="Diagonal")[self.nodes]
self.weights = dense_to_condensed(data, name="Weights", assert_symmetry=assert_symmetry, tol=tol)
self.edges = pd.Index(self.weights.index, name="Edges")
def _from_pandas_series(self, data:pd.Series, association):
assert all(data.index.map(lambda edge: isinstance(edge, frozenset))), "If `data` is pd.Series then each key in the index must be a frozenset of size 2"
assert_acceptable_arguments(association, self._acceptable_associations)
if association == "infer":
association = None
self.association = association
# To ensure that the ordering is maintained and this is compatible with methods that use an unlabeled upper triangle, we must reindex and sort
self.nodes = pd.Index(sorted(frozenset.union(*data.index)))
self.edges = pd.Index(map(frozenset, combinations(self.nodes, r=2)), name="Edges")
self.weights = pd.Series(data, name="Weights").reindex(self.edges)
def set_diagonal(self, diagonal):
if diagonal is None:
self.diagonal = None
else:
if is_number(diagonal):
diagonal = dict_build([(diagonal, self.nodes)])
assert is_dict_like(diagonal), "`diagonal` must be dict-like"
assert set(diagonal.keys()) >= set(self.nodes), "Not all `nodes` are in `diagonal`"
self.diagonal = pd.Series(diagonal, name="Diagonal")[self.nodes]
# =======
# Built-in
# =======
def __repr__(self):
pad = 4
header = format_header("Symmetric(Name:{}, dtype: {})".format(self.name, self.weights.dtype),line_character="=")
n = len(header.split("\n")[0])
fields = [
header,
pad*" " + "* Number of nodes ({}): {}".format(self.node_type, self.number_of_nodes),
pad*" " + "* Number of edges ({}): {}".format(self.edge_type, self.number_of_edges),
pad*" " + "* Association: {}".format(self.association),
pad*" " + "* Memory: {}".format(format_memory(self.memory)),
*map(lambda line:pad*" " + line, format_header("| Weights", "-", n=n-pad).split("\n")),
*map(lambda line: pad*" " + line, repr(self.weights).split("\n")[1:-1]),
]
return "\n".join(fields)
def __getitem__(self, key):
"""
`key` can be a node or non-string iterable of edges
"""
if is_nonstring_iterable(key):
assert len(key) >= 2, "`key` must have at least 2 identifiers. e.g. ('A','B')"
key = frozenset(key)
if len(key) == 1:
return self.diagonal[list(key)[0]]
else:
if len(key) > 2:
key = list(map(frozenset, combinations(key, r=2)))
return self.weights[key]
else:
if key in self.nodes:
s = frozenset([key])
mask = self.edges.map(lambda x: bool(s & x))
return self.weights[mask]
else:
raise KeyError("{} not in node list".format(key))
def __call__(self, key, func=np.sum):
"""
This can be used for connectivity in the context of networks but can be confusing with the versatiliy of __getitem__
"""
if hasattr(key, "__call__"):
return self.weights.groupby(key).apply(func)
else:
return func(self[key])
def __len__(self):
return self.number_of_nodes
def __iter__(self):
for v in self.weights:
yield v
def items(self):
return self.weights.items()
def iteritems(self):
return self.weights.iteritems()
def keys(self):
return self.weights.keys()
def apply(self, func):
return func(self.weights)
def mean(self):
return self.weights.mean()
def median(self):
return self.weights.median()
def min(self):
return self.weights.min()
def max(self):
return self.weights.max()
def idxmin(self):
return self.weights.idxmin()
def idxmax(self):
return self.weights.idxmax()
def sum(self):
return self.weights.sum()
def sem(self):
return self.weights.sem()
def var(self):
return self.weights.var()
def std(self):
return self.weights.std()
def describe(self, **kwargs):
return self.weights.describe(**kwargs)
def map(self, func):
return self.weights.map(func)
def entropy(self, base=2):
assert np.all(self.weights > 0), "All weights must be greater than 0"
return stats.entropy(self.weights, base=base)
# ==========
# Conversion
# ==========
def to_dense(self, index=None, fill_diagonal=None):
if fill_diagonal is None:
fill_diagonal=self.diagonal
if index is None:
index = self.nodes
return condensed_to_dense(y=self.weights, fill_diagonal=fill_diagonal, index=index)
def to_condensed(self):
return self.weights
# @check_packages(["ete3", "skbio"])
# def to_tree(self, method="average", into=None, node_prefix="y"):
# assert self.association == "dissimilarity", "`association` must be 'dissimilarity' to construct tree"
# if method in {"centroid", "median", "ward"}:
# warnings.warn("Methods ‘centroid’, ‘median’, and ‘ward’ are correctly defined only if Euclidean pairwise metric is used.\nSciPy Documentation - https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage")
# if into is None:
# into = ete3.Tree
# if not hasattr(self,"Z"):
# self.Z = linkage(self.weights.values, metric="precomputed", method=method)
# if not hasattr(self,"newick"):
# self.newick = linkage_to_newick(self.Z, self.nodes)
# tree = into(newick=self.newick, name=self.name)
# return name_tree_nodes(tree, node_prefix)
def to_networkx(self, into=None, **attrs):
if into is None:
into = nx.Graph
metadata = { "node_type":self.node_type, "edge_type":self.edge_type, "func_metric":self.func_metric}
metadata.update(attrs)
graph = into(name=self.name, **metadata)
for (node_A, node_B), weight in self.weights.iteritems():
graph.add_edge(node_A, node_B, weight=weight)
return graph
def to_file(self, path, **kwargs):
write_object(obj=self, path=path, **kwargs)
def copy(self):
return copy.deepcopy(self)
# ==============================================================================
# Associations
# ==============================================================================
# Biweight midcorrelation
def pairwise_biweight_midcorrelation(X, use_numba=False):
"""
X: {np.array, pd.DataFrame}
Code adapted from the following sources:
* https://stackoverflow.com/questions/61090539/how-can-i-use-broadcasting-with-numpy-to-speed-up-this-correlation-calculation/61219867#61219867
* https://github.com/olgabot/pandas/blob/e8caf4c09e1a505eb3c88b475bc44d9389956585/pandas/core/nanops.py
Special thanks to the following people:
* @norok2 (https://stackoverflow.com/users/5218354/norok2) for optimization (vectorization and numba)
* @olgabot (https://github.com/olgabot) for NumPy implementation
Benchmarking:
* iris_features (4,4)
* numba: 159 ms ± 2.85 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
* numpy: 276 µs ± 3.45 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
* iris_samples: (150,150)
* numba: 150 ms ± 7.57 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
* numpy: 686 µs ± 18.8 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
Future:
* Handle missing values
"""
# Data
result = None
labels = None
if isinstance(X, pd.DataFrame):
labels = X.columns
X = X.values
def _base_computation(A):
n, m = A.shape
A = A - np.median(A, axis=0, keepdims=True)
v = 1 - (A / (9 * np.median(np.abs(A), axis=0, keepdims=True))) ** 2
est = A * v ** 2 * (v > 0)
norms = np.sqrt(np.sum(est ** 2, axis=0))
return n, m, est, norms
# Check if numba is available
assert_acceptable_arguments(use_numba, {True, False, "infer"})
if use_numba == "infer":
if "numba" in sys.modules:
use_numba = True
else:
use_numba = False
print("Numba is available:", use_numba, file=sys.stderr)
# Compute using numba
if use_numba:
assert "numba" in sys.modules
from numba import jit
def _biweight_midcorrelation_numba(A):
@jit
def _condensed_to_dense(n, m, est, norms, result):
for i in range(m):
for j in range(i + 1, m):
x = 0
for k in range(n):
x += est[k, i] * est[k, j]
result[i, j] = result[j, i] = x / norms[i] / norms[j]
n, m, est, norms = _base_computation(A)
result = np.empty((m, m))
np.fill_diagonal(result, 1.0)
_condensed_to_dense(n, m, est, norms, result)
return result
result = _biweight_midcorrelation_numba(X)
# Compute using numpy
else:
def _biweight_midcorrelation_numpy(A):
n, m, est, norms = _base_computation(A)
return np.einsum('mi,mj->ij', est, est) / norms[:, None] / norms[None, :]
result = _biweight_midcorrelation_numpy(X)
# Add labels
if labels is not None:
result = pd.DataFrame(result, index=labels, columns=labels)
return result
# =============================
# Feature Engineering
# =============================
class CategoricalEngineeredFeature(object):
"""
Combine features using multiple categories.
# =========================================
from soothsayer_utils import get_iris_data
from scipy import stats
import pandas as pd
import ensemble_networkx as enx
X, y = get_iris_data(["X","y"])
# Usage
CEF = enx.CategoricalEngineeredFeature(name="Iris", observation_type="sample")
# Add categories
category_1 = pd.Series(X.columns.map(lambda x:x.split("_")[0]), X.columns)
CEF.add_category(
name_category="leaf_type",
mapping=category_1,
)
# Optionally add scaling factors, statistical tests, and summary statistics
# Compile all of the data
CEF.compile(scaling_factors=X.sum(axis=0), stats_tests=[stats.normaltest])
# Unpacking engineered groups: 100%|██████████| 1/1 [00:00<00:00, 2974.68it/s]
# Organizing feature sets: 100%|██████████| 4/4 [00:00<00:00, 17403.75it/s]
# Compiling synopsis [Basic Feature Info]: 100%|██████████| 2/2 [00:00<00:00, 32768.00it/s]
# Compiling synopsis [Scaling Factor Info]: 100%|██████████| 2/2 [00:00<00:00, 238.84it/s]
# View the engineered features
CEF.synopsis_
# initial_features number_of_features leaf_type(level:0) scaling_factors sum(scaling_factors) mean(scaling_factors) sem(scaling_factors) std(scaling_factors)
# leaf_type
# sepal [sepal_width, sepal_length] 2 sepal [458.6, 876.5] 1335.1 667.55 208.95 208.95
# petal [petal_length, petal_width] 2 petal [563.7, 179.90000000000003] 743.6 371.8 191.9 191.9
# Transform a dataset using the defined categories
CEF.fit_transform(X, aggregate_fn=np.sum)
# leaf_type sepal petal
# sample
# iris_0 8.6 1.6
# iris_1 7.9 1.6
# iris_2 7.9 1.5
# iris_3 7.7 1.7
"""
def __init__(self,
initial_feature_type=None,
engineered_feature_type=None,
observation_type=None,
unit_type=None,
name=None,
description=None,
assert_mapping_intersection=False,
):
self.initial_feature_type = initial_feature_type
self.engineered_feature_type=engineered_feature_type
self.observation_type = observation_type
self.unit_type = unit_type
self.name = name
self.description = description
self.assert_mapping_intersection = assert_mapping_intersection
self.__data__ = dict()
self.number_of_levels_ = 0
self.memory_ = sys.getsizeof(self)
self.compiled_ = False
def add_category(self, name_category:Hashable, mapping:Union[Mapping, pd.Series], level:int="infer", assert_mapping_exclusiveness=False, assert_level_nonexistent=True):
if level == "infer":
level = len(self.__data__)
assert name_category not in self.__data__, "Already added category: {}".format(name_category)
assert isinstance(mapping, (Mapping, pd.Series)), "`mapping` must be dict-like"
# Force iterables into set type
def f(x):
# How should this handle frozensets and tuples?
if is_nonstring_iterable(x) and not isinstance(x, Hashable):
if assert_mapping_exclusiveness:
raise AssertionError("name_category=`{}` must have one-to-one mapping exclusiveness. If this is not desired, please set `assert_mapping_exclusiveness=False` when adding component via `add_category`".format(name_category))
x = set(x)
return x
# Add categories
if assert_level_nonexistent:
assert level not in self.__data__, "`level={}` already existent".format(level)
self.__data__[level] = {
"name_category":name_category,
"mapping":pd.Series(mapping).map(f)
}
return self
# Compile all the categories
def compile(
self,
scaling_factors:pd.Series=None, # e.g. Gene Lengths,
stats_summary = [np.sum, np.mean, stats.sem, np.std],
stats_tests = [],
):
# Check features
def check_initial_features():
self.initial_features_union_ = set.union(*map(lambda dict_values: set(dict_values["mapping"].index), self.__data__.values()))
self.initial_features_intersection_ = set.intersection(*map(lambda dict_values: set(dict_values["mapping"].index), self.__data__.values()))
if self.assert_mapping_intersection:
assert self.initial_features_union_ == self.initial_features_intersection_, \
"All `mapping` must have same features mapped. features_union = {}; features_intersection = {}".format(
len(self.initial_features_union_),
len(self.initial_features_intersection_),
)
if scaling_factors is not None:
assert isinstance(scaling_factors, (Mapping, pd.Series)), "`scaling_factors` must be dict-like"
self.scaling_factors_ = pd.Series(scaling_factors)
assert set(self.initial_features_intersection_) <= set(self.scaling_factors_.index), "`scaling_factors` does not have all required `initial_features_intersection_`. In particular, the following number of features are missing:\n{}".format(len(self.initial_features_intersection_ - set(query_features)))
else:
self.scaling_factors_ = None
# Organizing features and groups
def organize_and_group_initial_features():
# Organize the data w/ respect to feature
feature_to_grouping = defaultdict(lambda: defaultdict(set))
for level, data in pv(sorted(self.__data__.items(), key=lambda item:item[0]), description="Unpacking engineered groups"):
name_category = data["name_category"]
for id_feature, values in data["mapping"].items():
if isinstance(values, Hashable):
values = set([values])
for v in values:
feature_to_grouping[id_feature][level].add(v)
# Organize the groups and create sets of features
self.engineered_to_initial_features_ = defaultdict(set)
self.initial_features_ = set(feature_to_grouping.keys())
for id_feature, grouping in pv(feature_to_grouping.items(), description="Organizing feature sets"):
grouping_iterables = map(lambda item: item[1], sorted(grouping.items(), key=lambda item: item[0]))
for engineered_feature in product(*grouping_iterables):
if len(engineered_feature) == self.number_of_levels_:
self.engineered_to_initial_features_[engineered_feature].add(id_feature)
# Compute synopsis
def get_synopsis():
name_to_level = dict(map(lambda item: (item[1]["name_category"], item[0]), self.__data__.items()))
self.synopsis_ = defaultdict(dict)
for engineered_feature, initial_features in pv(self.engineered_to_initial_features_.items(), description="Compiling synopsis [Basic Feature Info]"):
self.synopsis_[engineered_feature]["initial_features"] = list(initial_features)
self.synopsis_[engineered_feature]["number_of_features"] = len(initial_features)
for i, value in enumerate(engineered_feature):
level = self.levels_[i]
name_category = self.__data__[level]["name_category"]
self.synopsis_[engineered_feature]["{}(level:{})".format(name_category, level)] = value
if self.scaling_factors_ is not None:
for engineered_feature in pv(self.synopsis_.keys(), description="Compiling synopsis [Scaling Factor Info]"):
initial_features = self.synopsis_[engineered_feature]["initial_features"]
query_scaling_factors = self.scaling_factors_[initial_features]
self.synopsis_[engineered_feature]["scaling_factors"] = list(query_scaling_factors)
for func in stats_summary:
with Suppress():
self.synopsis_[engineered_feature]["{}(scaling_factors)".format(func.__name__)] = func(query_scaling_factors)
for func in stats_tests:
with Suppress():
try:
stat, p = func(query_scaling_factors)
self.synopsis_[engineered_feature]["{}|stat(scaling_factors)".format(func.__name__)] = stat
self.synopsis_[engineered_feature]["{}|p_value(scaling_factors)".format(func.__name__)] = p
except:
pass
self.synopsis_ = pd.DataFrame(self.synopsis_).T
if isinstance(self.synopsis_.index, pd.MultiIndex):
self.synopsis_.index.names = map(lambda item: item[1]["name_category"], sorted(self.__data__.items(), key=lambda item:item[0]))
# Basic Info
self.levels_ = list(self.__data__.keys())
self.number_of_levels_ = len(self.__data__)
if stats_summary is None:
stats_summary = []
if stats_tests is None:
stats_tests = []
# Run compilation
print(format_header("CategoricalEngineeredFeature(Name:{})".format(self.name),line_character="="), file=sys.stderr)
check_initial_features()
organize_and_group_initial_features()
get_synopsis()
self.stats_summary_ = stats_summary
self.stats_tests_ = stats_tests
self.memory_ = sys.getsizeof(self)
self.compiled_ = True
return self
# Transform a dataset
def fit_transform(
self,
X:pd.DataFrame,
aggregate_fn=np.sum,
) -> pd.DataFrame:
query_features = set(X.columns)
assert query_features >= self.initial_features_, "X.columns does not have all required `initial_features_`. In particular, the following number of features are missing:\n{}".format(len(self.initial_features_ - query_features))
# Aggregate features
results = dict()
for engineered_feature, initial_features in pv(self.engineered_to_initial_features_.items(), description="Aggregating engineered features"):
X_subset = X[initial_features]
aggregate = X_subset.apply(aggregate_fn, axis=1)
results[engineered_feature] = aggregate
df_aggregate = pd.DataFrame(results)
# Properly label MultiIndex
if isinstance(df_aggregate.columns, pd.MultiIndex):
df_aggregate.columns.names = map(lambda item: item[1]["name_category"], sorted(self.__data__.items(), key=lambda item:item[0]))
# df_aggregate.columns.names = self.synopsis_.index.names
df_aggregate.index.name = self.observation_type
return df_aggregate
# =======
# Built-in
# =======
def __repr__(self):
pad = 4
n_preview = 5
header = format_header("CategoricalEngineeredFeature(Name:{})".format(self.name),line_character="=")
n = len(header.split("\n")[0])
fields = [
header,
pad*" " + "* Number of levels: {}".format(self.number_of_levels_),
pad*" " + "* Memory: {}".format(format_memory(self.memory_)),
pad*" " + "* Compiled: {}".format(self.compiled_),
]
# Types
fields += [
*map(lambda line:pad*" " + line, format_header("| Types", "-", n=n-pad).split("\n")),
pad*" " + "* Initial feature type: {}".format(self.initial_feature_type),
pad*" " + "* Engineered feature type: {}".format(self.engineered_feature_type),
pad*" " + "* Observation feature type: {}".format(self.observation_type),
pad*" " + "* Unit type: {}".format(self.unit_type),
]
if self.compiled_:
fields += [
*map(lambda line:pad*" " + line, format_header("| Statistics", "-", n=n-pad).split("\n")),
2*pad*" " + "Scaling Factors: {}".format(self.scaling_factors_ is not None),
2*pad*" " + "Summary: {}".format(list(map(lambda fn: fn.__name__, self.stats_summary_))),
2*pad*" " + "Tests: {}".format(list(map(lambda fn: fn.__name__, self.stats_tests_))),
]
fields += [
*map(lambda line:pad*" " + line, format_header("| Categories", "-", n=n-pad).split("\n")),
]
for level, d in self.__data__.items():
fields += [
pad*" " + "* Level {} - {}:".format(level, d["name_category"]),
2*pad*" " + "Number of initial features: {}".format(d["mapping"].index.nunique()),
2*pad*" " + "Number of categories: {}".format(len(flatten(d["mapping"].values, into=set))),
]
fields += [
*map(lambda line:pad*" " + line, format_header("| Features", "-", n=n-pad).split("\n")),
]
fields += [
pad*" " + 2*" " + "Number of initial features (Intersection): {}".format(len(self.initial_features_intersection_)),
pad*" " + 2*" " + "Number of initial features (Union): {}".format(len(self.initial_features_union_)),
pad*" " + 2*" " + "Number of engineered features: {}".format(len(self.engineered_to_initial_features_)),
]
return "\n".join(fields)
def __getitem__(self, key):
"""
`key` can be a node or non-string iterable of edges
"""
recognized = False
if isinstance(key, int):
try:
recognized = True
return self.__data__[key]
except KeyError:
raise KeyError("{} level not in self.__data__".format(key))
if isinstance(key, tuple):
assert self.compiled_, "Please compile before using self.__getitem__ method."
try:
recognized = True
return self.engineered_to_initial_features_[key]
except KeyError:
raise KeyError("{} engineered feature not in self.engineered_to_initial_features_".format(key))
if not recognized:
raise KeyError("Could not interpret key: {}. Please use self.__getitem__ method for querying level data with an int or features with a tuple.".format(key))
def __len__(self):
return len(self.engineered_to_initial_features_)
def __iter__(self):
for v in self.engineered_to_initial_features_.items():
yield v
def items(self):
return self.engineered_to_initial_features_.items()
def iteritems(self):
for v in self.engineered_to_initial_features_.items():
yield v
def to_file(self, path, **kwargs):
write_object(obj=self, path=path, **kwargs)
def copy(self):
return copy.deepcopy(self)
# =============================
# Ensemble Association Networks
# =============================
class EnsembleAssociationNetwork(object):
"""
# Load in data
import soothsayer_utils as syu
X = syu.get_iris_data(["X"])
# Create ensemble network
ens = enx.EnsembleAssociationNetwork(name="Iris", node_type="leaf measurement", edge_type="association", observation_type="specimen")
ens.fit(X=X, metric="spearman", n_iter=100, stats_summary=[np.mean,np.var, stats.kurtosis, stats.skew], stats_tests=[stats.normaltest], copy_ensemble=True)
print(ens)
# =======================================================
# EnsembleAssociationNetwork(Name:Iris, Metric: spearman)
# =======================================================
# * Number of nodes (leaf measurement): 4
# * Number of edges (association): 6
# * Observation type: specimen
# ---------------------------------------------------
# | Parameters
# ---------------------------------------------------
# * n_iter: 100
# * sampling_size: 92
# * random_state: 0
# * with_replacement: False
# * transformation: None
# * memory: 16.156 KB
# ---------------------------------------------------
# | Data
# ---------------------------------------------------
# * Features (n=150, m=4, memory=10.859 KB)
# * Ensemble (memory=4.812 KB)
# * Statistics (['mean', 'var', 'kurtosis', 'skew', 'normaltest|stat', 'normaltest|p_value'], memory=496 B)
# View ensemble
print(ens.ensemble_.head())
# Edges (sepal_width, sepal_length) (sepal_length, petal_length) \
# Iterations
# 0 -0.113835 0.880407
# 1 -0.243982 0.883397
# 2 -0.108511 0.868627
# 3 -0.151437 0.879405
# 4 -0.241807 0.869027
# View statistics
print(ens.stats_.head())
# Statistics mean var kurtosis skew \
# Edges
# (sepal_width, sepal_length) -0.167746 0.002831 0.191176 0.287166
# (sepal_length, petal_length) 0.880692 0.000268 -0.107437 0.235619
# (petal_width, sepal_length) 0.834140 0.000442 -0.275487 -0.219778
# (sepal_width, petal_length) -0.304403 0.003472 -0.363377 0.059179
# (sepal_width, petal_width) -0.285237 0.003466 -0.606118 0.264103
__future__:
* Add ability to load in previous data. However, this is tricky because one needs to validate that the following objects are the same:
- X
- sampling_size
- n_iter
- random_state
- with_replacement
etc.
"""
def __init__(
self,
name=None,
node_type=None,
edge_type=None,
observation_type=None,
assert_symmetry=True,
assert_draw_size=True,
assert_nan_safe_functions=True,
nans_ok=True,
tol=1e-10,
# temporary_directory=None,
# remove_temporary_directory=True,
# compression="gzip",
# absolute_path=False,
# force_overwrite=False,
):
self.name = name
self.node_type = node_type
self.edge_type = edge_type
self.observation_type = observation_type
self.assert_symmetry = assert_symmetry
self.assert_draw_size = assert_draw_size
self.assert_nan_safe_functions = assert_nan_safe_functions
self.nans_ok = nans_ok
self.tol = tol
# if temporary_directory == False:
# temporary_directory = None
# if temporary_directory:
# # Do tsomething where you can resume from a previous tmp
# if temporary_directory == True:
# temporary_directory = ".EnsembleAssociationNetwork__{}".format(get_unique_identifier())
# temporary_directory = format_path(temporary_directory, absolute=absolute_path)
# os.makedirs(temporary_directory, exist_ok=True)
# self.temporary_directory = temporary_directory
# self.remove_temporary_directory = remove_temporary_directory
# assert_acceptable_arguments(compression, {"gzip", "bz2", None})
# self.compression = compression
# self.force_overwrite = force_overwrite
def _pandas_association(self, X, metric):
return X.corr(method=metric)
def fit(
self,
X:pd.DataFrame,
metric="rho",
n_iter=1000,
sampling_size=0.6180339887,
transformation=None,
random_state=0,
with_replacement=False,
function_is_pairwise=True,
stats_summary=[np.mean, np.median, np.var, stats.kurtosis, stats.skew] ,
stats_tests=[stats.normaltest],
copy_X=True,
copy_ensemble=True,
a=np.asarray(np.linspace(-1,1,999).tolist() + [np.nan]),
):
# Metric
assert metric is not None
metric_name = None
if hasattr(metric, "__call__"):
if not function_is_pairwise:
function = metric
metric_name = function.__name__
metric = lambda X: self._pandas_association(X=X, metric=function)
acceptable_metrics = {"rho", "phi", "biweight_midcorrelation", "spearman", "pearson", "kendall"}
if isinstance(metric, str):
assert_acceptable_arguments(metric, acceptable_metrics)
metric_name = metric
if metric == "rho":
metric = pairwise_rho
if metric == "phi":
metric = pairwise_phi
if metric == "biweight_midcorrelation":
metric = pairwise_biweight_midcorrelation
if metric in {"spearman", "pearson", "kendall"}:
association = metric
metric = lambda X: self._pandas_association(X=X, metric=association)
assert hasattr(metric, "__call__"), "`metric` must be either one of the following: [{}], \
a custom metric that returns an association (set `function_is_pairwise=False`), or a custom \
metric that returns a 2D square/symmetric pd.DataFrame (set `function_is_pairwise=True`)".format(acceptable_metrics)
# Transformations
acceptable_transformations = {"signed", "abs"}
if transformation:
if isinstance(transformation, str):
assert_acceptable_arguments(transformation, acceptable_transformations)
if transformation == "signed":
transformation = signed
if transformation == "abs":
transformation = np.abs
assert hasattr(transformation, "__call__"), "`transformation` must be either one of the following: [{}] or a function(pd.DataFrame) -> pd.DataFrame".format(acceptable_transformations)
# Check statistics functions
if self.assert_nan_safe_functions:
if self.nans_ok:
number_of_nan = np.isnan(X.values).ravel().sum()
if number_of_nan > 0:
if stats_summary:
for func in stats_summary:
v = func(a)
assert np.isfinite(v), "`stats_summary` function `{}` is cannot handle `nan` ({} missing values)".format(func.__name__, number_of_nan)
if stats_tests:
for func in stats_tests:
v = func(a)[-1]
assert np.isfinite(v), "`stats_tests` function `{}` is cannot handle `nan` ({} missing values)".format(func.__name__, number_of_nan)
# Data
n, m = X.shape
# Network
nodes = pd.Index(X.columns)
number_of_nodes = len(nodes)
edges = pd.Index(map(frozenset, combinations(nodes, r=2)), name="Edges")
number_of_edges = len(edges)
# Get draws
draws = list()
# Use custom draws
if is_nonstring_iterable(n_iter):
draw_sizes = list()
available_observations = set(X.index)
for draw in n_iter:
# Check that there are no unique observations in the draw not present in X.index
query = set(draw) - available_observations
assert len(query) == 0, "The following observations are not available in `X.index`:\n{}".format(query)
draws.append(list(draw))
# Get draw size
draw_sizes.append(len(draw))
unique_draw_sizes = set(draw_sizes)
number_unique_draw_sizes = len(unique_draw_sizes)
if self.assert_draw_size:
assert number_unique_draw_sizes == 1, "With `assert_draw_size=True` all draw sizes must be the same length"
# Update
if number_unique_draw_sizes == 1:
sampling_size = list(unique_draw_sizes)[0]
else:
sampling_size = draw_sizes
n_iter = len(draws)
random_state = np.nan
with_replacement = np.nan
# Do not use custom draws (this is default)
else:
assert 0 < sampling_size < n
if 0 < sampling_size < 1:
sampling_size = int(sampling_size*n)
# Iterations
number_of_unique_draws_possible = comb(n, sampling_size, exact=True, repetition=with_replacement)
assert n_iter <= number_of_unique_draws_possible, "`n_iter` exceeds the number of possible draws (total_possible={})".format(number_of_unique_draws_possible)
if random_state is not None:
assert isinstance(random_state, int), "`random_state` must either be `None` or of `int` type"
for j in range(n_iter):
# Get draw of samples
if random_state is None:
rs = None
else:
rs = j + random_state
index = np.random.RandomState(rs).choice(X.index, size=sampling_size, replace=with_replacement)
draws.append(index.tolist())
# Stats
if (stats_tests is None) or (stats_tests is False):
stats_tests = []
if hasattr(stats_tests, "__call__"):
stats_tests = [stats_tests]
stats_tests = list(stats_tests)
if (stats_summary is None) or (stats_summary is False):
stats_summary = []
if hasattr(stats_summary, "__call__"):
stats_summary = [stats_summary]
stats_summary = list(stats_summary)
for func in (stats_tests + stats_summary):
assert hasattr(func, "__name__")
# Associations
ensemble = np.empty((n_iter, number_of_edges))
ensemble[:] = np.nan
for i, index in pv(enumerate(draws), description="Computing associations ({})".format(self.name), total=n_iter, unit=" draws"):
# Compute associations with current draw
df_associations = metric(X.loc[index])
if self.assert_symmetry:
assert is_symmetrical(df_associations, tol=self.tol)
weights = squareform(df_associations.values, checks=False) #dense_to_condensed(X=df_associations, assert_symmetry=self.assert_symmetry, tol=self.tol)
ensemble[i] = weights
ensemble = | pd.DataFrame(ensemble, columns=edges) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" Test functions in tables.py.
"""
import numpy as np
"""License:
Copyright 2020 The Cytoscape Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import pandas as df
from requests import HTTPError
from test_utils import *
class TablesTests(unittest.TestCase):
def setUp(self):
try:
close_session(False)
# delete_all_networks()
except:
pass
def tearDown(self):
pass
@print_entry_exit
def test_delete_table_column(self):
# Initialization
load_test_session()
def check_delete(table, column):
columns = set(get_table_column_names(table=table))
self.assertEqual(delete_table_column(table=table, column=column), '')
columns.discard(column)
fewer_columns = set(get_table_column_names(table=table))
self.assertSetEqual(set(columns), set(fewer_columns))
check_delete('node', 'BetweennessCentrality')
check_delete('edge', 'EdgeBetweenness')
check_delete('node', 'boguscolumn')
self.assertRaises(CyError, delete_table_column, table='bogustable', column='boguscolumn')
self.assertRaises(CyError, get_table_column_names, network='bogus')
@print_entry_exit
def test_get_table_columns(self):
# Initialization
load_test_session()
# Verify that an empty column list returns all columns, and all columns have at least one non-nan value
df = get_table_columns()
self.assertSetEqual(set(df.columns),
{'BetweennessCentrality', 'gal1RGexp', 'Eccentricity', 'Stress', 'NumberOfDirectedEdges',
'NeighborhoodConnectivity', 'NumberOfUndirectedEdges', 'selected', 'gal4RGsig', 'Degree',
'gal80Rsig', 'SUID', 'gal80Rexp', 'TopologicalCoefficient', 'ClusteringCoefficient',
'Radiality', 'gal4RGexp', 'gal1RGsig', 'name', 'degree.layout', 'ClosenessCentrality',
'COMMON', 'AverageShortestPathLength', 'shared name', 'PartnerOfMultiEdgedNodePairs',
'SelfLoops', 'isExcludedFromPaths', 'IsSingleNode'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that an explicity column list returns exact columns, and each has at least one non-nan value
df = get_table_columns(columns=['gal1RGexp', 'Eccentricity', 'Stress'])
self.assertSetEqual(set(df.columns), {'gal1RGexp', 'Eccentricity', 'Stress'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that a column list as a comma-separated string returns exact columns, and each has at least one non-nan value
df = get_table_columns(columns='Stress, NumberOfDirectedEdges')
self.assertSetEqual(set(df.columns), {'Stress', 'NumberOfDirectedEdges'})
self.assertEqual(len(df.index), get_node_count())
self.assertNotIn(False, [True in list(df[col].notnull()) for col in df.columns])
# Verify that a bogus column name still returns a column, though it must be all nan
df = get_table_columns(columns='Stress, bogus')
self.assertSetEqual(set(df.columns), {'Stress', 'bogus'})
self.assertEqual(len(df.index), get_node_count())
self.assertTrue(True in list(df['Stress'].notnull()))
self.assertFalse(False in df['bogus'].isnull())
# Verify that an empty column list returns all columns for edges, too
df = get_table_columns(table='edge')
self.assertSetEqual(set(df.columns),
{'SUID', 'shared name', 'shared interaction', 'name', 'selected', 'interaction',
'EdgeBetweenness'})
self.assertEqual(len(df.index), get_edge_count())
self.assertRaises(CyError, get_table_columns, table='bogustable', columns='boguscolumn')
self.assertRaises(CyError, get_table_columns, network='bogus')
@print_entry_exit
def test_get_table_value(self):
# Initialization
load_test_session()
self.assertEqual(get_table_value('node', 'YDL194W', 'gal1RGexp'), 0.139)
self.assertEqual(get_table_value('node', 'YDL194W', 'Degree'), 1)
self.assertFalse(get_table_value('node', 'YDL194W', 'IsSingleNode'))
self.assertEqual(get_table_value('node', 'YDL194W', 'COMMON'), 'SNF3')
self.assertEqual(get_table_value('edge', 'YLR197W (pp) YOR310C', 'EdgeBetweenness'), 2.0)
self.assertEqual(get_table_value('network', 'galFiltered.sif', 'publication'),
'Integrated Genomic and Proteomic Analyses of a Systematically Perturbed Metabolic Network\n'
'<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>\n'
'Science 4 May 2001: 292 (5518), 929-934. [DOI:10.1126/science.292.5518.929]')
# TODO: Fetching a None number raises an error, but should really return a None ... can this be changed?
# TODO: Find out if a null string in Cytoscape is the same thing as a None
self.assertRaises(CyError, get_table_value, 'node', 'YER056CA', 'gal1RGexp')
self.assertIsNone(get_table_value('node', 'YER056CA', 'COMMON'))
self.assertRaises(CyError, get_table_value, 'node', 'YDL194W', 'gal1RGexp', network='bogus')
@print_entry_exit
def test_get_table_column_names(self):
# Initialization
load_test_session()
self.assertSetEqual(set(get_table_column_names()),
{'SUID', 'shared name', 'name', 'selected', 'AverageShortestPathLength',
'BetweennessCentrality', 'ClosenessCentrality', 'ClusteringCoefficient', 'Degree',
'Eccentricity', 'IsSingleNode', 'NeighborhoodConnectivity', 'NumberOfDirectedEdges',
'NumberOfUndirectedEdges', 'PartnerOfMultiEdgedNodePairs', 'Radiality', 'SelfLoops',
'Stress', 'TopologicalCoefficient', 'degree.layout', 'COMMON', 'gal1RGexp', 'gal4RGexp',
'gal80Rexp', 'gal1RGsig', 'gal4RGsig', 'gal80Rsig', 'isExcludedFromPaths'})
self.assertSetEqual(set(get_table_column_names('edge')),
{'SUID', 'shared name', 'shared interaction', 'name', 'selected', 'interaction',
'EdgeBetweenness'})
self.assertSetEqual(set(get_table_column_names('network')),
{'SUID', 'shared name', 'name', 'selected', '__Annotations', 'publication', 'Dataset Name',
'Dataset URL'})
self.assertRaises(CyError, get_table_column_names, 'library')
self.assertRaises(CyError, get_table_column_names, network='bogus')
@print_entry_exit
def test_get_table_column_types(self):
# Initialization
load_test_session()
self.assertDictEqual(get_table_column_types(),
{'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',
'AverageShortestPathLength': 'Double', 'BetweennessCentrality': 'Double',
'ClosenessCentrality': 'Double', 'ClusteringCoefficient': 'Double', 'Degree': 'Integer',
'Eccentricity': 'Integer', 'IsSingleNode': 'Boolean',
'NeighborhoodConnectivity': 'Double', 'NumberOfDirectedEdges': 'Integer',
'NumberOfUndirectedEdges': 'Integer', 'PartnerOfMultiEdgedNodePairs': 'Integer',
'Radiality': 'Double', 'SelfLoops': 'Integer', 'Stress': 'Long',
'TopologicalCoefficient': 'Double', 'degree.layout': 'Integer', 'COMMON': 'String',
'gal1RGexp': 'Double', 'gal4RGexp': 'Double', 'gal80Rexp': 'Double',
'gal1RGsig': 'Double', 'gal4RGsig': 'Double', 'gal80Rsig': 'Double',
'isExcludedFromPaths': 'Boolean'})
self.assertDictEqual(get_table_column_types('edge'),
{'SUID': 'Long', 'shared name': 'String', 'shared interaction': 'String', 'name': 'String',
'selected': 'Boolean', 'interaction': 'String', 'EdgeBetweenness': 'Double'})
self.assertDictEqual(get_table_column_types('network'),
{'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',
'__Annotations': 'List', 'publication': 'String', 'Dataset Name': 'String',
'Dataset URL': 'String'})
self.assertRaises(CyError, get_table_column_types, 'library')
self.assertRaises(CyError, get_table_column_types, 'edge', network='bogus')
@print_entry_exit
def test_load_table_data_from_file(self):
def check_table(original_columns, new_column_name, key_values, table_name='node'):
# Make sure we get exactly the expected columns
self.assertSetEqual(set(get_table_column_names(table=table_name)), original_columns | {new_column_name})
# Make sure we get exactly the expected number of values in the new column
table = get_table_columns(table=table_name, columns=['name', new_column_name])
table.dropna(inplace=True)
table.set_index('name', inplace=True)
self.assertEqual(len(table.index), len(key_values))
# Make sure the new column values are as expected
for key, val in key_values:
self.assertEqual(table[new_column_name][key], val)
# Initialization
load_test_session()
node_column_names = set(get_table_column_names())
edge_column_names = set(get_table_column_names(table='edge'))
# Verify that a table with column headers can be loaded into the node table
res = load_table_data_from_file('data/defaultnode_table.tsv', first_row_as_column_names=True)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with column headers can be loaded into the edge table
res = load_table_data_from_file('data/defaultedge_table.tsv', first_row_as_column_names=True, table='edge')
check_table(edge_column_names, 'newcol_e', [('YDR277C (pp) YDL194W', 1000), ('YDR277C (pp) YJR022W', 2000), ('YPR145W (pp) YMR117C', 3000)], table_name='edge')
# Verify that a spreadsheet with column headers can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.xlsx', first_row_as_column_names=True)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with no header can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.no-header.tsv', first_row_as_column_names=False)
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with extra lines at the beginning can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.extra-lines.tsv', first_row_as_column_names=False, start_load_row=4)
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with different field delimiters can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.semi-delimiter.txt', first_row_as_column_names=False, delimiters=' ,;')
check_table(node_column_names, '1', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with values in a different order can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.backwards.tsv', first_row_as_column_names=True, data_key_column_index=2)
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
# Verify that a table with indexing on a different table column can be loaded into the node table
load_test_session()
res = load_table_data_from_file('data/defaultnode_table.COMMON.tsv', first_row_as_column_names=True, table_key_column='COMMON')
check_table(node_column_names, 'newcol', [('YDR277C', 2), ('YDL194W', 1), ('YBR043C', 3)])
self.assertRaises(CyError, load_table_data_from_file, 'bogus file name')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', start_load_row=-1)
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', delimiters='bogus')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', data_key_column_index='newcol')
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', data_key_column_index=-1)
self.assertRaises(CyError, load_table_data_from_file, 'data/defaultnode_table.tsv', table_key_column='bogus column')
@print_entry_exit
def test_load_table_data(self):
def check_values_added(table_column_names, table_key_name, test_data, data_key_name, data_value_name, table='node'):
data = get_table_columns(table=table)
self.assertEqual(len(table_column_names) + 2, len(data.columns))
self.assertIn(data_key_name, data.columns)
self.assertIn(data_value_name, data.columns)
added_data = data[data[table_key_name] == data[data_key_name]]
self.assertEqual(len(test_data.index), len(added_data.index))
verify_each_newcol_value = [added_data[added_data[data_key_name] == row[data_key_name]].iloc[0][data_value_name] == row[data_value_name]
for row_index, row in test_data.iterrows()]
self.assertNotIn(False, verify_each_newcol_value)
# Initialization
load_test_session()
# Verify that adding into rows that don't exist fails
unrelated_data = df.DataFrame(data={'id': ['New1', 'New2', 'New3'], 'newcol': [1, 2, 3]})
self.assertRaises(CyError, load_table_data, unrelated_data, data_key_column='id', table='node', table_key_column='name')
# Verify that adding into node table rows that do exist succeeds ... checks that string-keys work
column_names_string_keyed = get_table_column_names()
test_data_string_keyed = df.DataFrame(data={'id': ['YDL194W', 'YDR277C', 'YBR043C'], 'newcol': [1, 2, 3]})
res = load_table_data(test_data_string_keyed, data_key_column='id', table='node', table_key_column='name')
self.assertEqual(res, 'Success: Data loaded in defaultnode table')
# Verify that ID column and newcol were added, and that the newcols have values only for the named nodes
check_values_added(column_names_string_keyed, 'name', test_data_string_keyed, 'id', 'newcol')
# Given newcol values, use them as non-string keys to add yet another column
column_names_int_keyed = get_table_column_names()
test_data_int_keyed = | df.DataFrame(data={'newcol_val': [1, 2, 3], 'derived': [100, 200, 300]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
def get_market_list(client, *args):
marketList = pd.DataFrame(client.get_products()['data'])
if len(args)>0:
quoteBase = args[0]
marketList = marketList[marketList['quoteAsset']==quoteBase]
marketList['volume_24h'] = marketList['tradedMoney']
marketList = marketList[['symbol', 'volume_24h']]
tickers = pd.DataFrame(client.get_ticker())
tickers['priceChangePercent'] = | pd.to_numeric(tickers['priceChangePercent']) | pandas.to_numeric |
#
import datetime
import os
import logging
import click
import pandas as pd
from QARealtimeCollector.collectors.stockbarcollector import QARTCStockBar
from QARealtimeCollector.connector.crawl import get_formater_easy_ticks, get_formater_akshare_ticks, TICK_SOURCE
logger = logging.getLogger(__name__)
class QARTC_StockTick(QARTCStockBar):
def __init__(self, delay=0.0, date: datetime.datetime = None, log_dir='./log', debug=False, tick_source=TICK_SOURCE.EQ_SINA):
if delay == 0:
if tick_source == TICK_SOURCE.EQ_SINA:
delay = 20
elif tick_source == TICK_SOURCE.AK_A_EM:
delay = 60
super().__init__(delay=delay, date=date, log_dir=log_dir, debug=debug)
self.tick_source = tick_source
def get_data_from_source(self):
if self.tick_source == TICK_SOURCE.EQ_SINA:
l1_ticks_data = get_formater_easy_ticks(self.code_list)
l1_ticks_data = pd.DataFrame(l1_ticks_data)
elif self.tick_source == TICK_SOURCE.AK_A_EM:
l1_ticks_data = get_formater_akshare_ticks()
else:
l1_ticks_data = | pd.DataFrame() | pandas.DataFrame |
import tempfile
from datetime import datetime
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import pytz
from eemeter.testing.mocks import MockWeatherClient
from eemeter.weather import ISDWeatherSource
from eemeter.modeling.formatters import ModelDataBillingFormatter
from eemeter.structures import EnergyTrace
@pytest.fixture
def mock_isd_weather_source():
tmp_url = "sqlite:///{}/weather_cache.db".format(tempfile.mkdtemp())
ws = ISDWeatherSource("722880", tmp_url)
ws.client = MockWeatherClient()
return ws
@pytest.fixture
def trace1():
data = {
"value": [1, 1, 1, 1, np.nan],
"estimated": [False, False, True, False, False]
}
columns = ["value", "estimated"]
index = [
datetime(2011, 1, 1, tzinfo=pytz.UTC),
datetime(2011, 2, 1, tzinfo=pytz.UTC),
datetime(2011, 3, 2, tzinfo=pytz.UTC),
datetime(2011, 4, 3, tzinfo=pytz.UTC),
datetime(2011, 4, 29, tzinfo=pytz.UTC),
]
df = pd.DataFrame(data, index=index, columns=columns)
return EnergyTrace("ELECTRICITY_CONSUMPTION_SUPPLIED", df, unit="KWH")
@pytest.fixture
def trace2():
data = {
"value": [np.nan],
"estimated": [True]
}
columns = ["value", "estimated"]
index = [
datetime(2011, 1, 1, tzinfo=pytz.UTC),
]
df = pd.DataFrame(data, index=index, columns=columns)
return EnergyTrace("ELECTRICITY_CONSUMPTION_SUPPLIED", df, unit="KWH")
@pytest.fixture
def trace3():
data = {
"value": [1, np.nan],
"estimated": [True, False]
}
columns = ["value", "estimated"]
index = [
datetime(2011, 1, 1, tzinfo=pytz.UTC),
datetime(2011, 2, 1, tzinfo=pytz.UTC),
]
df = pd.DataFrame(data, index=index, columns=columns)
return EnergyTrace("ELECTRICITY_CONSUMPTION_SUPPLIED", df, unit="KWH")
@pytest.fixture
def trace4():
trace_length = 100
data = {
"value": [1 for _ in range(trace_length)],
"estimated": [False for _ in range(trace_length)]
}
columns = ["value", "estimated"]
index = pd.date_range(
start=datetime(2011, 1, 1, tzinfo=pytz.UTC),
periods=trace_length,
freq='D',
tz=pytz.UTC
)
df = | pd.DataFrame(data, index=index, columns=columns) | pandas.DataFrame |
"""
Engineering Time
- In this example, we are going to extract different ways of representing time from a timestamp.
- We can extract for example:
- hour
- minute
- second
- data
- elapsed time
We will create a toy dataset for the demonstration. """
import pandas as pd
import numpy as np
import datetime
# let's create a toy data set: 1 column 7 different timestamps,
# 1 hr difference between timestamp
date = pd.Series(pd.date_range('2015-1-5 11:20:00', periods=7, freq='H'))
df = pd.DataFrame(dict(date=date))
df
date
""" 0 2015-01-05 11:20:00
1 2015-01-05 12:20:00
2 2015-01-05 13:20:00
3 2015-01-05 14:20:00
4 2015-01-05 15:20:00
5 2015-01-05 16:20:00
6 2015-01-05 17:20:00 """
""" Extract the hr, minute and second """
df['hour'] = df['date'].dt.hour
df['min'] = df['date'].dt.minute
df['sec'] = df['date'].dt.second
df
"""
date hour min sec
0 2015-01-05 11:20:00 11 20 0
1 2015-01-05 12:20:00 12 20 0
2 2015-01-05 13:20:00 13 20 0
3 2015-01-05 14:20:00 14 20 0
4 2015-01-05 15:20:00 15 20 0
5 2015-01-05 16:20:00 16 20 0
6 2015-01-05 17:20:00 17 20 0 """
""" Extract time part """
df['time'] = df['date'].dt.time
df
"""
date hour min sec time
0 2015-01-05 11:20:00 11 20 0 11:20:00
1 2015-01-05 12:20:00 12 20 0 12:20:00
2 2015-01-05 13:20:00 13 20 0 13:20:00
3 2015-01-05 14:20:00 14 20 0 14:20:00
4 2015-01-05 15:20:00 15 20 0 15:20:00
5 2015-01-05 16:20:00 16 20 0 16:20:00
6 2015-01-05 17:20:00 17 20 0 17:20:00 """
# Extract hr, min, sec, at the same time
# now let's repeat what we did in cell 3 in 1 command
df[['h','m','s']] = pd.DataFrame([(x.hour, x.minute, x.second) for x in df['time']])
df
"""
date hour min sec time h m s
0 2015-01-05 11:20:00 11 20 0 11:20:00 11 20 0
1 2015-01-05 12:20:00 12 20 0 12:20:00 12 20 0
2 2015-01-05 13:20:00 13 20 0 13:20:00 13 20 0
3 2015-01-05 14:20:00 14 20 0 14:20:00 14 20 0
4 2015-01-05 15:20:00 15 20 0 15:20:00 15 20 0
5 2015-01-05 16:20:00 16 20 0 16:20:00 16 20 0
6 2015-01-05 17:20:00 17 20 0 17:20:00 17 20 0 """
# Calculate time difference
# let's create another toy dataframe with 2 timestamp columns
# and 7 rows each, in the first column the timestamps change monthly,
# in the second column the timestamps change weekly
date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=7, freq='M'))
date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=7, freq='W'))
df = pd.DataFrame(dict(Start_date = date1, End_date = date2))
df
"""
Start_date End_date
0 2012-01-31 12:00:00 2013-03-17 21:45:00
1 2012-02-29 12:00:00 2013-03-24 21:45:00
2 2012-03-31 12:00:00 2013-03-31 21:45:00
3 2012-04-30 12:00:00 2013-04-07 21:45:00
4 2012-05-31 12:00:00 2013-04-14 21:45:00
5 2012-06-30 12:00:00 2013-04-21 21:45:00
6 2012-07-31 12:00:00 2013-04-28 21:45:00 """
# let's calculate the time elapsed in seconds
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'s')
df
"""
Start_date End_date diff_seconds
0 2012-01-31 12:00:00 2013-03-17 21:45:00 35545500.0
1 2012-02-29 12:00:00 2013-03-24 21:45:00 33644700.0
2 2012-03-31 12:00:00 2013-03-31 21:45:00 31571100.0
3 2012-04-30 12:00:00 2013-04-07 21:45:00 29583900.0
4 2012-05-31 12:00:00 2013-04-14 21:45:00 27510300.0
5 2012-06-30 12:00:00 2013-04-21 21:45:00 25523100.0
6 2012-07-31 12:00:00 2013-04-28 21:45:00 23449500.0 """
# let's calculate the time elapsed in minutes
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'m')
df
"""
Start_date End_date diff_seconds
0 2012-01-31 12:00:00 2013-03-17 21:45:00 592425.0
1 2012-02-29 12:00:00 2013-03-24 21:45:00 560745.0
2 2012-03-31 12:00:00 2013-03-31 21:45:00 526185.0
3 2012-04-30 12:00:00 2013-04-07 21:45:00 493065.0
4 2012-05-31 12:00:00 2013-04-14 21:45:00 458505.0
5 2012-06-30 12:00:00 2013-04-21 21:45:00 425385.0
6 2012-07-31 12:00:00 2013-04-28 21:45:00 390825.0 """
""" For more details visit article: http://www.datasciencemadesimple.com/difference-two-timestamps-seconds-minutes-hours-pandas-python-2/
Work with different timezones
- below, we will see how to work with timestamps that are in different time zones.
"""
# first, let's create a toy dataframe with some timestamps in different time zones
df = pd.DataFrame()
df['time'] = pd.concat([
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3,
tz='Europe/Berlin')),
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3, tz='US/Central'))
], axis=0)
df
"""
time
0 2014-08-01 09:00:00+02:00
1 2014-08-01 10:00:00+02:00
2 2014-08-01 11:00:00+02:00
0 2014-08-01 09:00:00-05:00
1 2014-08-01 10:00:00-05:00
2 2014-08-01 11:00:00-05:00
- We can see the different timezones indicated by the +2 and -5, respect to the meridian.
"""
# to work with different time zones, first we unify the timezone to the central one
# setting utc = True
df['time_utc'] = | pd.to_datetime(df['time'], utc=True) | pandas.to_datetime |
import pandas as pd
import numpy as np
from zipfile import ZipFile
from cleanco import cleanco
import re
import os
import zipcode
from pyzipcode import ZipCodeDatabase
#State formats
state_format_extrastuff = '^[a-zA-Z]{2} '
state_format_right = '^[A-Z]{2}$'
#Set up standardization for date fields
date_right_pattern = '^[0-9]{4}-[0-9]{2}-[0-9]{2}$'
date_long_format = '^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
date_long_format_no_space = '^[0-9]{4}-[0-9]{2}-[0-9]{2}[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
date_slash_format = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}$'
date_slash_format_5='^[0-9]{1,2}/[0-9]{1,2}/[0-9]{5}$'
date_slash_format_2 = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{2}$'
date_slash_format_long = '^[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$'
#Set up standardization for zipcodes
right_zip = '^[0-9]{5}$'
long_zip = '^[0-9]{5}-[0-9]{4}$'
zip_dict = {'77098':'TX','49855':'MI', '48075':'MI','48334':'MI', '48034':'MI', '48335':'MI','95014':'CA','92833':'CA','92834':'CA','10962':'NY','98117':'WA','98765':'WA','20008':'DC','20002':'DC','21704':'MD','20814':'MD','21208':'MD','22222':'VA'}
state_values = ['TX', 'MA', 'MI', 'CA', 'VA', 'NJ', 'NY', 'PA', 'FL', 'MN', 'IL',
'MD', 'CT', 'WA', 'IA', 'CO', 'AZ', 'GA', 'OK', 'LA', 'WI', 'ND',
'UT', 'IN', 'OH', 'KY', 'NC', 'NH', 'MO', 'TN', 'ID', 'VT', 'DC',
'SD', 'AL', 'OR', 'AR', 'NM', 'SC', 'NE', 'DE', 'WY', 'HI', 'KS',
'WV', 'ME', 'RI', 'NV', 'MS', 'AK','MT','PR','GU','VI']
def get_file_from_zip(zipname, filename):
with ZipFile('/'.join(['raw_data', zipname])) as zip_file:
with zip_file.open(filename) as file:
return pd.read_csv(file, dtype=str, encoding='Latin-1')
def count_wrong_formats(df):
has_submitted = "CASE_SUBMITTED" in df.columns
has_decision = "DECISION_DATE" in df.columns
wrong_submitted_date_format = 0
wrong_decision_date_format=0
wrong_employer_state_format = 0
wrong_worksite_state_format = 0
wrong_zip_format = 0
for index, row in df.iterrows():
if has_submitted and (pd.isnull(row['CASE_SUBMITTED']) or not re.match(date_right_pattern,row['CASE_SUBMITTED'])):
wrong_submitted_date_format+=1
if has_decision and (pd.isnull(row['DECISION_DATE']) or not re.match(date_right_pattern,row['DECISION_DATE'])):
wrong_decision_date_format+=1
if row['EMPLOYER_STATE'] not in state_values:
wrong_employer_state_format+=1
if row['WORKSITE_STATE'] not in state_values:
wrong_worksite_state_format+=1
if pd.isnull(row['EMPLOYER_POSTAL_CODE']) or not re.match(right_zip, row['EMPLOYER_POSTAL_CODE']):
wrong_zip_format+=1
if has_submitted:
print(wrong_submitted_date_format,"bad CASE_SUBMITTED fields")
if has_decision:
print(wrong_decision_date_format,"bad DECISION_DATE fields")
print(wrong_employer_state_format, "bad EMPLOYER_STATE fields")
print(wrong_worksite_state_format, "bad WORKSITE_STATE fields")
print(wrong_zip_format, "bad EMPLOYER_POSTAL_CODE fields")
def address_concatenate(data):
data["EMPLOYER_ADDRESS"] = (data["EMPLOYER_ADDRESS1"].map(str) +" "+ data["EMPLOYER_ADDRESS2"].map(str)).str.replace('nan','').str.upper().str.strip()
#Set up cleaning functions
def employer_name_uppercase_cleanco(x):
if pd.isnull(x):
return x
else:
return cleanco(str(x).upper()).clean_name()
def uppercase_nopunct(x):
if pd.isnull(x):
return x
else:
return str(x).upper().replace('[^\w\s]','').replace('/',' ').replace('"','').replace(' ',' ').strip()
def clean_states(x):
if pd.isnull(x):
return x
if str(x).strip()=='':
return np.nan
elif re.match(state_format_right, x):
return x
elif re.match(state_format_extrastuff, x) and x[:2] in state_values:
return x[:2]
elif x=="MARYLAND":
return "MD"
elif x=="NEW YORK":
return "NY"
else:
print("\t\tState Error, ",x)
return x
def case_status_withdrawn(x):
if x.WITHDRAWN=="Y" or x.WITHDRAWN=="y":
return x.CASE_STATUS+"-WITHDRAWN"
else:
return x.CASE_STATUS
def dot_code_format(x):
if pd.isnull(x):
return x
else:
return str(x).zfill(3)
def check_apply_date_pattern(x):
if pd.isnull(x):
return x
else:
x=str(x).replace(" ",'')
if re.match(date_right_pattern,x):
return x
elif re.match(date_long_format, x):
return x[:10].strip()
elif re.match(date_long_format_no_space,x):
return x[:10].strip()
elif re.match(date_slash_format, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_5, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:x.index('/',x.index('/')+1)+5]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_2, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:]
return "20{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
elif re.match(date_slash_format_long, x):
month = x[:x.index('/')]
day = x[x.index('/')+1:x.index('/',x.index('/')+1)]
year = x[x.index('/',x.index('/')+1)+1:x.index('/',x.index('/')+1)+5]
return "{}-{}-{}".format(year.strip(), month.zfill(2).strip(), day.zfill(2).strip())
else:
print("\t\tDATE ERROR: x is",x,"returning None")
return None
def fix_zip(x):
if pd.isnull(x):
return x
x=str(x).strip()
if x.isnumeric():
x=x.zfill(5)
if re.match(right_zip,x):
return x
elif re.match(long_zip,x):
return x[:5]
else:
print("\t\tError in zip,",x)
return x
def fix_visa_class(x):
if pd.isnull(x):
return x
x=str(x).strip()
valid = ["H-1B","E-3","H-1B1 CHILE","H-1B1 SINGAPORE"]
if x in valid:
return x
elif x=="R":
return "H-1B"
elif x=="A":
return "E-3"
elif x=="C":
return "H-1B1 CHILE"
elif x=="S":
return "H-1B1 SINGAPORE"
else:
print("\t\tError in visa class, ",x)
return x
def fix_employer_states(x):
if pd.isnull(x['EMPLOYER_STATE']):
return x.EMPLOYER_STATE
elif x['EMPLOYER_STATE'] not in state_values and x.EMPLOYER_POSTAL_CODE.isdigit():
if x.EMPLOYER_POSTAL_CODE in zip_dict.keys():
return zip_dict[x.EMPLOYER_POSTAL_CODE]
else:
newzip = zipcode.isequal(x.EMPLOYER_POSTAL_CODE)
if newzip is not None:
return newzip.state
else:
pyzip = findpyzipcode(x)
if pyzip:
return pyzip
print("\t\tCouldnt find",x.EMPLOYER_POSTAL_CODE,"in either zip package")
return x.EMPLOYER_STATE
elif x['EMPLOYER_STATE'] not in state_values and re.match(state_format_right, x.EMPLOYER_POSTAL_CODE.upper()):
#print("Employer state found in postal code, shifting",x.EMPLOYER_POSTAL_CODE.upper())
x.EMPLOYER_CITY = x.EMPLOYER_STATE
return x.EMPLOYER_POSTAL_CODE.upper()
else:
return x.EMPLOYER_STATE
def findpyzipcode(x):
zcdb = ZipCodeDatabase()
try:
value = zcdb[x]
return value.state
except IndexError:
return None
def fix_worksite_states(x):
if | pd.isnull(x['WORKSITE_STATE']) | pandas.isnull |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = | pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# build first input with noational aggregation
# import build_input_national_aggr
print('####################')
print('BUILDING INPUT DATA FOR DISAGGREGATION OF SWITZERLAND INTO ARCHETYPES')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_national_aggr'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
#def replace_table(df, tb):
#
## list_col = list(aql.get_sql_cols(tb, sc, db).keys())
#
# aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# exec_strg = '''
# AlTER
# DELETE FROM {sc}.{tb}
# WHERE {del_str}
# '''.format(tb=tb, sc=sc, del_str=del_str)
# aql.exec_sql(exec_strg, db=db)
#
# aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
#
#aql.exec_sql('''
# ALTER TABLE lp_input_archetypes.profdmnd
# DROP CONSTRAINT profdmnd_pkey,
# DROP CONSTRAINT profdmnd_dmnd_pf_id_fkey;
# ''', db=db)
#%%
dfprop_era_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/prop_era_arch.csv', sep = ';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv.csv'),sep=';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv_prop_0.csv'),sep=';')
dfpv_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/surf_prod_arch_pv_prop_new.csv',sep=';')
# set nd_id to that potential
#dfpv_arch['pv_power_pot'] = dfpv_arch['el_prod']/(1000*dfkev['flh'].mean())
dfpv_arch = dfpv_arch.groupby(dfpv_arch.nd_id_new).sum()
#dfpv_arch['nd_id_new'] = dfpv_arch.nd_id
#dfpv_arch.loc[:,dfpv_arch.nd_id_new.str.contains('OTH')] == 'OTH_TOT'
#dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_pot']/dfpv_arch['pv_power_pot'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_tot_est']/dfpv_arch['pv_power_tot_est'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_st_pwr'] = 0
#
#dfpv_arch_CH0 = dfpv_arch.loc['CH0']
#dfpv_arch = dfpv_arch.drop(['CH0'], axis = 0)
dfpv_arch = dfpv_arch.reset_index()
# %%
dfload_arch = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id not in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch['DateTime'] = dfload_arch['DateTime'].astype('datetime64[ns]')
dfload_arch_res = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("SFH") or nd_id.str.contains("MFH")',engine='python').reset_index(drop=True)
dfload_arch_res['DateTime'] = dfload_arch_res['DateTime'].astype('datetime64[ns]')
dfload_arch_notres = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("OCO") or nd_id.str.contains("IND")',engine='python').reset_index(drop=True)
dfload_arch_notres['DateTime'] = dfload_arch_notres['DateTime'].astype('datetime64[ns]')
dfload_arch_CH0 = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch_CH0['DateTime'] = dfload_arch_CH0['DateTime'].astype('datetime64[ns]')
# dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'],'!=')])
# dfload_arch_res= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['SFH%','MFH%'],'LIKE')])
# dfload_arch_notres= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['OCO%','IND%'],'LIKE')])
# dfload_arch_CH0_1 = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'])])
#dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes')
dfload_dict ={}
dfload_dict_new = {}
df = dfload_arch_res.copy()
df['nd_id_new'] = 0
df['erg_tot_new'] = 0
for i in df.nd_id.unique():
dfload_dict[i] = df.loc[df.nd_id == i]
for l in (0,1,2,3):
df_1 = dfload_dict[i].copy()
df_1['erg_tot_new'] = df_1.loc[:,'erg_tot'] * dfprop_era_arch.loc[dfprop_era_arch.nd_el.str.contains(i+'_'+str(l)),'prop'].reset_index(drop=True).loc[0]
df_1['nd_id_new'] = i+'_'+str(l)
dfload_dict_new[i+'_'+str(l)] = df_1
dfload_arch_res_new = dfload_arch_notres.head(0)
for j in dfload_dict_new:
dfload_arch_res_new = dfload_arch_res_new.append(dfload_dict_new[j],ignore_index=True)
dfload_arch_notres['nd_id_new'] = dfload_arch_notres[['nd_id']]
dfload_arch_notres['erg_tot_new'] = dfload_arch_notres[['erg_tot']]
dfload_arch = dfload_arch_res_new.append(dfload_arch_notres,ignore_index=True)
dfload_arch = dfload_arch.set_index('DateTime')
dfload_arch.index = pd.to_datetime(dfload_arch.index)
dfload_arch_CH0 = dfload_arch_CH0.set_index('DateTime')
dfload_arch = dfload_arch.drop(columns=['nd_id','erg_tot']).rename(columns={'nd_id_new':'nd_id','erg_tot_new':'erg_tot'})
# %%
np.random.seed(3)
dferg_arch = dfload_arch.groupby('nd_id')['erg_tot'].sum()
dferg_arch = dferg_arch.reset_index()
dferg_arch['nd_id_new'] = dferg_arch.nd_id
dict_nd = dferg_arch.set_index('nd_id')['nd_id_new'].to_dict()
# %%
df_solar_canton_raw = pd.read_csv(base_dir+'/archetype_disaggr/PV/swiss_location_solar.csv')[['value', 'hy', 'canton','DateTime']]
df_solar_canton_raw['DateTime'] = df_solar_canton_raw['DateTime'].astype('datetime64[ns]')
# df_solar_canton_raw_test = aql.read_sql(db, 'profiles_raw', 'swiss_location_solar',
# keep=['value', 'hy', 'canton','DateTime'])
df_solar_canton_raw_1 = df_solar_canton_raw.pivot_table(index='DateTime',columns='canton', values='value')
df_solar_canton_1h = df_solar_canton_raw_1.resample('1h').sum()/4
df_solar_canton_1h['avg_all'] = df_solar_canton_1h.mean(axis=1)
df_solar_canton_1h['DateTime'] = df_solar_canton_1h.index
df_solar_canton_1h = df_solar_canton_1h.reset_index(drop=True)
df_solar_canton_1h['hy'] = df_solar_canton_1h.index
df_solar_canton_raw_1h = pd.melt(df_solar_canton_1h, id_vars=['DateTime','hy'], var_name='canton', value_name='value')
df_solar_canton_1h.index = df_solar_canton_1h['DateTime']
df_solar_canton_1h = df_solar_canton_1h.drop(columns=['DateTime','hy'])
cols = df_solar_canton_1h.columns.tolist()
cols = cols[-1:] + cols[:-1]
df_solar_canton_1h = df_solar_canton_1h[cols]
#list_ct = df_solar_canton_raw.canton.unique().tolist()
list_ct = df_solar_canton_1h.columns.tolist()
# %% ~~~~~~~~~~~~~~~~~~ DEF_NODE
#
#df_def_node_0 = aql.read_sql(db, sc, 'def_node', filt=[('nd', ['SFH%'], ' NOT LIKE ')])
#df_nd_add = pd.DataFrame(pd.concat([dferg_filt.nd_id_new.rename('nd'),
# ], axis=0)).reset_index(drop=True)
color_nd = {'IND_RUR': '#472503',
'IND_SUB': '#041FA3',
'IND_URB': '#484A4B',
'MFH_RUR_0': '#924C04',
'MFH_SUB_0': '#0A81EE',
'MFH_URB_0': '#BDC3C5',
'MFH_RUR_1': '#924C04',
'MFH_SUB_1': '#0A81EE',
'MFH_URB_1': '#BDC3C5',
'MFH_RUR_2': '#924C04',
'MFH_SUB_2': '#0A81EE',
'MFH_URB_2': '#BDC3C5',
'MFH_RUR_3': '#924C04',
'MFH_SUB_3': '#0A81EE',
'MFH_URB_3': '#BDC3C5',
'OCO_RUR': '#6D3904',
'OCO_SUB': '#0A31EE',
'OCO_URB': '#818789',
'SFH_RUR_0': '#BD6104',
'SFH_SUB_0': '#0EBADF',
'SFH_URB_0': '#A9A4D8',
'SFH_RUR_1': '#BD6104',
'SFH_SUB_1': '#0EBADF',
'SFH_URB_1': '#A9A4D8',
'SFH_RUR_2': '#BD6104',
'SFH_SUB_2': '#0EBADF',
'SFH_URB_2': '#A9A4D8',
'SFH_RUR_3': '#BD6104',
'SFH_SUB_3': '#0EBADF',
'SFH_URB_3': '#A9A4D8',
}
col_nd_df = pd.DataFrame.from_dict(color_nd, orient='index').reset_index().rename(columns={'index': 'nd',0:'color'})
df_def_node_0 = pd.read_csv(data_path_prv + '/def_node.csv')
# df_def_node_0 = aql.read_sql(db, sc, 'def_node')
df_nd_add = pd.DataFrame(pd.concat([dferg_arch.nd_id_new.rename('nd'),
], axis=0)).reset_index(drop=True)
# reduce numbar
#df_nd_add = df_nd_add
nd_id_max = df_def_node_0.loc[~df_def_node_0.nd.isin(df_nd_add.nd)].nd_id.max()
df_nd_add['nd_id'] = np.arange(0, len(df_nd_add)) + nd_id_max + 1
#df_nd_add['color'] = 'g'
df_nd_add = pd.merge(df_nd_add,col_nd_df, on = 'nd')
df_def_node = df_nd_add.reindex(columns=df_def_node_0.columns.tolist()).fillna(0)
dict_nd_id = df_nd_add.set_index('nd')['nd_id'].to_dict()
dict_nd_id = {nd_old: dict_nd_id[nd] for nd_old, nd in dict_nd.items()
if nd in dict_nd_id}
# %% set nd_id number to the corresponding nd_id new
dfpv_arch = dfpv_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dfpv_arch.loc[key,'nd_id'] = value
dferg_arch = dferg_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dferg_arch.loc[key,'nd_id'] = value
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PP_TYPE
df_def_pp_type_0 = pd.read_csv(data_path_prv + '/def_pp_type.csv')
# df_def_pp_type_0 = aql.read_sql(db, sc, 'def_pp_type')
df_def_pp_type = df_def_pp_type_0.copy().head(0)
for npt, pt, cat, color in ((0, 'STO_LI_SFH', 'NEW_STORAGE_LI_SFH', '#7B09CC'),
(1, 'STO_LI_MFH', 'NEW_STORAGE_LI_MFH', '#59F909'),
(2, 'STO_LI_OCO', 'NEW_STORAGE_LI_OCO', '#28A503'),
(3, 'STO_LI_IND', 'NEW_STORAGE_LI_IND', '#1A6703'),
(4, 'PHO_SFH', 'PHOTO_SFH', '#D9F209'),
(5, 'PHO_MFH', 'PHOTO_MFH', '#F2D109'),
(6, 'PHO_OCO', 'PHOTO_OCO', '#F27E09'),
(7, 'PHO_IND', 'PHOTO_IND', '#F22C09'),):
df_def_pp_type.loc[npt] = (npt, pt, cat, color)
df_def_pp_type['pt_id'] = np.arange(0, len(df_def_pp_type)) + df_def_pp_type_0.pt_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~ DEF_FUEL
# all there
df_def_fuel = pd.read_csv(data_path_prv + '/def_fuel.csv')
# df_def_fuel_test = aql.read_sql(db, sc, 'def_fuel')
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PLANT
df_def_plant_0 = pd.read_csv(data_path_prv + '/def_plant.csv')
# df_def_plant_test = aql.read_sql(db, sc, 'def_plant')
dict_pp_id_all = df_def_plant_0.set_index('pp')['pp_id'].to_dict()
df_pp_add_0 = pd.DataFrame(df_nd_add.nd).rename(columns={'nd': 'nd_id'})
df_pp_add_1 = df_pp_add_0.nd_id.str.slice(stop=3)
df_pp_add = pd.DataFrame()
for sfx, fl_id, pt_id, set_1 in [('_PHO', 'photovoltaics', 'PHO_', ['set_def_pr','set_def_add']),
('_STO_LI', 'new_storage', 'STO_LI_', ['set_def_st','set_def_add']),
]:
new_pp_id = df_def_plant_0.pp_id.max() + 1
data = dict(pp=df_pp_add_0 + sfx,
fl_id=fl_id, pt_id=pt_id + df_pp_add_1 , pp_id=np.arange(new_pp_id, new_pp_id + len(df_pp_add_0)),
**{st: 1 if st in set_1 else 0 for st in [c for c in df_def_plant_0.columns if 'set' in c]})
df_pp_add = df_pp_add.append(df_pp_add_0.assign(**data), sort=True)
df_pp_add.pp_id = np.arange(0, len(df_pp_add)) + df_pp_add.pp_id.min()
df_def_plant = df_pp_add[df_def_plant_0.columns].reset_index(drop=True)
for df, idx in [(df_def_fuel, 'fl'), (df_def_pp_type, 'pt'), (df_def_node, 'nd')]:
df_def_plant, _ = translate_id(df_def_plant, df, idx)
# selecting random profiles from canton list
#np.random.seed(4)
dict_pp_id = df_pp_add.set_index('pp')['pp_id'].to_dict()
df_pp_add_pho = df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics']
dict_pp_id_pho = df_pp_add_pho.set_index('pp')['pp_id'].to_dict()
# solar profile dictionary by node
dict_ct = {pp: list_ct[npp%len(list_ct)]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
dict_ct = {pp: list_ct[0]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEF_PROFILE
df_def_profile_0 = pd.read_csv(data_path_prv + '/def_profile.csv')
# df_def_profile_test = aql.read_sql(db, sc, 'def_profile')
df_def_profile_sup = pd.DataFrame({'primary_nd': df_solar_canton_1h.columns}) + '_PHO'
df_def_profile_sup['pf'] = 'supply_' + df_def_profile_sup.primary_nd
df_def_profile_sup['pf_id'] = df_def_profile_sup.index.rename('pf_id') + df_def_profile_0.pf_id.max() + 1
df_def_profile_sup = df_def_profile_sup[df_def_profile_0.columns]
df_def_profile_sup.drop(df_def_profile_sup.tail(23).index,inplace=True) # to keep only average for now
# Demand profiles
df_def_profile_dmnd = df_def_node.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd['pf'] = 'demand_EL_' + df_def_profile_dmnd.primary_nd
df_def_profile_dmnd['pf_id'] = df_def_profile_dmnd.index.rename('pf_id') + df_def_profile_sup.pf_id.max() + 1
df_def_profile_dmnd = df_def_profile_dmnd[df_def_profile_0.columns]
df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
# df_def_profile_prc], axis=0)
df_def_profile = df_def_profile.reset_index(drop=True)
#df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
df_def_profile
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NODE_ENCAR
df_node_encar_0 = pd.read_csv(data_path_prv + '/node_encar.csv')
# df_node_encar_0 = aql.read_sql(db, sc, 'node_encar')
df_node_encar_0_CH0 = df_node_encar_0.copy().loc[(df_node_encar_0.nd_id == 1)]
factor_CH0_dmnd = dfload_arch_CH0.erg_tot.sum()/df_node_encar_0.loc[(df_node_encar_0.nd_id == 1)].dmnd_sum
factor_CH0_dmnd = factor_CH0_dmnd.reset_index(drop=True)
df = df_node_encar_0_CH0.filter(like='dmnd_sum')*factor_CH0_dmnd.loc[0]
df_node_encar_0_CH0.update(df)
#exec_str = '''UPDATE sc.node_encar SET
# SET sc.dmnd_sum = df_node_encar_0_CH0.dmnd_sum
# WHERE nd_id = 1
#
# '''
#aql.exec_sql(exec_str=exec_str,db=db)
#df_ndca_add = (dferg_filt.loc[dferg_filt.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot_filled']]
# .rename(columns={'erg_tot_filled': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
df_ndca_add = (dferg_arch.loc[dferg_arch.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot']]
.rename(columns={'erg_tot': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
#TODO maybe add here some grid losses
data = dict(vc_dmnd_flex=0.1, ca_id=0, grid_losses=0.0413336227316051, grid_losses_absolute=0)
df_node_encar = df_ndca_add.assign(**data).reindex(columns=df_node_encar_0.columns)
list_dmnd = [c for c in df_node_encar if 'dmnd_sum' in c]
df_node_encar = df_node_encar.assign(**{c: df_node_encar.dmnd_sum
for c in list_dmnd})
df_node_encar = | pd.merge(df_node_encar, df_def_profile_dmnd, left_on='nd_id', right_on='primary_nd', how='inner') | pandas.merge |
from django.core.files import temp
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import FileResponse
from django.views.static import serve
import xlsxwriter
import pdfkit
import csv
import numpy
#import required libraries
import pandas as pd
import pyexcel
import xlrd
from matplotlib import pylab
from matplotlib import collections as mc
from pylab import *
from pylev3 import Levenshtein
from matplotlib.ticker import PercentFormatter
from matplotlib import pyplot
import matplotlib.pyplot as plt
import PIL, PIL.Image
import os
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
'''from google.colab import drive
drive.mount('/content/drive')'''
# Create your views here.
def welcome(request):
return HttpResponse("Welcome")
def ourResponse(request):
return HttpResponse("OUR RESPONSE")
def takeInput(request):
return render(request,'input.html')
def similarity(seq1, seq2):
l1 , l2 = len(seq1), len(seq2)
ldist = Levenshtein.wf(seq1, seq2)
return (1 - ldist/max(l1, l2))*100
def df_gdomain_counter(df):
df_count = df["ProteinID"].value_counts()
return df_count
def match(x, y, mm):
mismatch = 0
for i in range(len(x)):
if (x[i] == 'X' or x[i] == y[i]):
pass
else:
mismatch += 1
if (mismatch <= mm):
return True
else:
return False
def shuffler(word):
word_to_scramble = list(word)
numpy.random.shuffle(word_to_scramble)
# O=seq= ''.join(seq_temp)
new_word = ''.join(word_to_scramble)
return new_word
def list_of_7mer_X(sevenmer):
x_data = []
for r1 in range(7):
x = list(sevenmer)
x[r1] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
def performAlgo(request):
myfile = request.FILES['document']
print(myfile.name)
fs = FileSystemStorage()
'''fs.save(myfile.name, myfile)'''
workbook = xlsxwriter.Workbook('media/new.xlsx')
family = request.POST.get("input01")
outpath = "media/new.xlsx"
df1 = pd.read_excel(myfile)
df2 = df1
for i in range((df1.shape[0] - 1)):
A = df1.loc[i, "Sequence"]
B = df1.loc[(i + 1), "Sequence"]
percent_similarity = similarity(A, B)
if (percent_similarity >= 90):
df2 = df2.drop(df2[df2.Sequence == B].index)
df2.to_excel(outpath, index=False)
NumProteins = df2.shape[0]
def H(protein_id, protein, x1, x2, x3, x4, mm1, mm2, mm3, mm4, min13, min34, min45, max13, max34, max45):
pL1 = []
pL2 = []
pL3 = []
pL4 = []
L1 = []
L2 = []
L3 = []
L4 = []
for i in range(len(protein) - len(x1)):
if (match(x1, protein[i:i + len(x1)], mm1) == True):
# global L1
pL1 = pL1 + [i]
L1 = L1 + [protein[i:i + len(x1)]]
# print "L1 = ", pL1,L1
for j in range(len(protein) - len(x2)):
if (match(x2, protein[j:j + len(x2)], mm2) == True):
# global L2
pL2 = pL2 + [j]
L2 = L2 + [protein[j:j + len(x2)]]
# print "L2 = ", pL2,L2
for k in range(len(protein) - len(x3)):
if (match(x3, protein[k:k + len(x3)], mm3) == True):
# global L3
pL3 = pL3 + [k]
L3 = L3 + [protein[k:k + len(x3)]]
# print "L3 = ", pL3,L3
for l in range(len(protein) - len(x4)):
if (match(x4, protein[l:l + len(x4)], mm4) == True):
# global L3
pL4 = pL4 + [l]
L4 = L4 + [protein[l:l + len(x4)]]
candidates = []
for i in range(len(pL1)):
for j in range(len(pL2)):
for k in range(len(pL3)):
for l in range(len(pL4)):
if (min13 <= pL2[j] - pL1[i] <= max13 and min34 <= pL3[k] - pL2[j] <= max34 and min45 <=
pL4[l] - pL3[k] <= max45):
# if 80 <=pL2[j]-pL1[i] <= 120 and 40 <=pL3[k]- pL2[j] <= 80 and 20 <=pL4[l]- pL3[k] <= 80
a = L1[i]
a_pos = pL1[i]
b = L2[j]
b_pos = pL2[j]
c = L3[k]
c_pos = pL3[k]
d = L4[l]
d_pos = pL4[l]
candidates.append((protein_id, a, a_pos, b, b_pos, c, c_pos, d, d_pos))
return candidates
abc = []
l1 = []
inpath = "media/new.xlsx"
mismatch1 = int(request.POST.get("mismatch1"))
mismatch2 = int(request.POST.get("mismatch2"))
mismatch3 = int(request.POST.get("mismatch3"))
mismatch4 = int(request.POST.get("mismatch4"))
mismatch41 = mismatch4
x1 = request.POST.get("x1")
x2 = request.POST.get("x2")
x3 = request.POST.get("x3")
x4 = request.POST.get("x4")
Min_G1_G3 = int(request.POST.get("Min_G1_G3"))
Max_G1_G3 = int(request.POST.get("Max_G1_G3"))
Min_G3_G4 = int(request.POST.get("Min_G3_G4"))
Max_G3_G4 = int(request.POST.get("Max_G3_G4"))
Min_G4_G5 = int(request.POST.get("Min_G4_G5"))
Max_G4_G5 = int(request.POST.get("Max_G4_G5"))
workbook = xlsxwriter.Workbook('media/output_wo_bias.xlsx')
outpath = "media/output_wo_bias.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x4, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position.1', 'G4-box', 'Position.2',
'G5-box', 'Position.3'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_nomismatch.xlsx')
outpath = "media/SA_nomismatch.xlsx"
str1 = "XXX"
x41 = str1 + x4 + "X"
mismatch41 = 0
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
#protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x41, mismatch1, mismatch2, mismatch3, mismatch41, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_mismatch.xlsx')
outpath = "media/SA_mismatch.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x41, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/A_nomismatch.xlsx')
outpath = "media/A_nomismatch.xlsx"
y = x4[1:]
z = y[:-1]
x42 = str1 + z + str1
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x42, mismatch1, mismatch2, mismatch3, mismatch41, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
inpath_SA_mm = "media/SA_mismatch.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_X_dict_count.xlsx')
outpath2_SA_mm = "media/SA_mm_7mer_X_dict_count.xlsx"
str2 = [["Rab", 470], ["Rac", 128], ["Ran", 29], ["Ras", 190], ["Roc", 19], ["Arf", 140], ["AlG1", 44],
["Era", 188], ["FeoB", 18], ["Hflx", 26], ["GB1", 116], ["EngB", 401], ["Dynamin", 115], ["IRG", 10],
["Obg", 659], ["Septin", 86], ["SRP", 99], ["Translational", 2869], ["tRme", 454], ["EngA", 424]]
#for i in str2:
# if (i[0] == family):
# #total = i[1]
total = NumProteins
data = pd.read_excel(inpath_SA_mm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set):
id_set[x] = set()
id_set[x].add(ID)
else:
id_set[x].add(ID)
id_set.items()
with open(outpath1_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set.items()]
with open(outpath2_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set.items()]
inpath_A_nomm = "media/A_nomismatch.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_X_dict.xlsx')
outpath1_A_nomm = "media/A_nomm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_X_dict_count.xlsx')
outpath2_A_nomm = "media/A_nomm_7mer_X_dict_count.xlsx"
data1 = pd.read_excel(inpath_A_nomm)
unique_7mers = data1['G5-box'].unique()
temp = data1
id_set1 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set1):
id_set1[x] = set()
id_set1[x].add(ID)
else:
id_set1[x].add(ID)
id_set1.items()
with open(outpath1_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set1.items()]
with open(outpath2_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set1.items()]
inpath_SA_nomm = "media/SA_nomismatch.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_X_dict.xlsx')
outpath1_SA_nomm = "media/SA_nomm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_X_dict_count.xlsx')
outpath2_SA_nomm = "media/SA_nomm_7mer_X_dict_count.xlsx"
data2 = pd.read_excel(inpath_SA_nomm)
unique_7mers = data2['G5-box'].unique()
temp = data2
id_set2 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set2):
id_set2[x] = set()
id_set2[x].add(ID)
else:
id_set2[x].add(ID)
id_set2.items()
with open(outpath1_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set2.items()]
with open(outpath2_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set2.items()]
workbook = xlsxwriter.Workbook('media/7mer_X_dict.xlsx')
outpath1 = "media/7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/7mer_X_count_dict.xlsx')
outpath2 = "media/7mer_X_count_dict.xlsx"
SA_nomm = pd.read_excel(inpath_SA_nomm)
A_nomm = pd.read_excel(inpath_A_nomm)
SA_mm = pd.read_excel(inpath_SA_mm)
table = [SA_nomm[['ProteinID', 'G5-box', 'Position']], A_nomm[['ProteinID', 'G5-box', 'Position']],
SA_mm[['ProteinID', 'G5-box', 'Position']]]
# to be used when SA with no mismatch doesn't give any result.
# table= [A_nomm[['Entry', 'G5_box', 'Value']], SA_mm[['Entry', 'G5_box', 'Value']]]
data3 = pd.concat(table)
data3 = data3.reset_index(drop=True)
unique_7mers = data3['G5-box'].unique()
temp = data3
id_set3 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set3):
id_set3[x] = set()
id_set3[x].add(ID)
else:
id_set3[x].add(ID)
id_set3.items()
with open(outpath1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set3.items()]
with open(outpath2, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set3.items()]
def list_of_7mer_2X(sevenmer):
x_data = []
for r1 in range(7):
for r2 in range(7):
if (r1 != r2):
x = list(sevenmer)
x[r1] = "X"
x[r2] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_2X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_2X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_2X_dict_count.xlsx')
outpath2_SA_mm = "media/SA_mm_7mer_2X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_mm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_2X(seq)
for x in x_data:
if (x not in id_set):
id_set[x] = set()
id_set[x].add(ID)
else:
id_set[x].add(ID)
id_set.items()
with open(outpath1_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set.items()]
with open(outpath2_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set.items()]
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_2X_dict.xlsx')
outpath1_A_nomm = "media/A_nomm_7mer_2X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_2X_dict_count.xlsx')
outpath2_A_nomm = "media/A_nomm_7mer_2X_dict_count.xlsx"
data = pd.read_excel(inpath_A_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set1 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_2X(seq)
for x in x_data:
if (x not in id_set1):
id_set1[x] = set()
id_set1[x].add(ID)
else:
id_set1[x].add(ID)
id_set1.items()
with open(outpath1_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set1.items()]
with open(outpath2_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set1.items()]
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_2X_dict.xlsx')
outpath1_SA_nomm = "media/SA_nomm_7mer_2X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_2X_dict_count.xlsx')
outpath2_SA_nomm = "media/SA_nomm_7mer_2X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set2 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_2X(seq)
for x in x_data:
if (x not in id_set2):
id_set2[x] = set()
id_set2[x].add(ID)
else:
id_set2[x].add(ID)
id_set2.items()
with open(outpath1_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set2.items()]
with open(outpath2_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set2.items()]
workbook = xlsxwriter.Workbook('media/7mer_2X_dict.xlsx')
outpath1 = "media/7mer_2X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/7mer_2X_count_dict.xlsx')
outpath2 = "media/7mer_2X_count_dict.xlsx"
SA_nomm = pd.read_excel(inpath_SA_nomm)
A_nomm = pd.read_excel(inpath_A_nomm)
SA_mm = pd.read_excel(inpath_SA_mm)
table = [SA_nomm[['ProteinID', 'G5-box', 'Position']], A_nomm[['ProteinID', 'G5-box', 'Position']],
SA_mm[['ProteinID', 'G5-box', 'Position']]]
# table= [A_nomm[['Entry', 'G5_box', 'Value']], SA_mm[['Entry', 'G5_box', 'Value']]]
data = pd.concat(table)
data = data.reset_index(drop=True)
unique_7mers = data['G5-box'].unique()
temp = data
id_set3 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_2X(seq)
for x in x_data:
if (x not in id_set3):
id_set3[x] = set()
id_set3[x].add(ID)
else:
id_set3[x].add(ID)
id_set3.items()
with open(outpath1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set3.items()]
with open(outpath2, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set3.items()]
def list_of_7mer_3X(sevenmer):
x_data = []
for r1 in range(7):
for r2 in range(7):
for r3 in range(7):
if (r1 != r2 and r1 != r3 and r2 != r3):
x = list(sevenmer)
x[r1] = "X"
x[r2] = "X"
x[r3] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_3X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_3X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_3X_dict_count.xlsx')
outpath2_SA_mm = "media/SA_mm_7mer_3X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_mm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_3X(seq)
for x in x_data:
if (x not in id_set):
id_set[x] = set()
id_set[x].add(ID)
else:
id_set[x].add(ID)
id_set.items()
with open(outpath1_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set.items()]
with open(outpath2_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set.items()]
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_3X_dict.xlsx')
outpath1_A_nomm = "media/A_nomm_7mer_3X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_3X_dict_count.xlsx')
outpath2_A_nomm = "media/A_nomm_7mer_3X_dict_count.xlsx"
data = pd.read_excel(inpath_A_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set1 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_3X(seq)
for x in x_data:
if (x not in id_set1):
id_set1[x] = set()
id_set1[x].add(ID)
else:
id_set1[x].add(ID)
id_set1.items()
with open(outpath1_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set1.items()]
with open(outpath2_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set1.items()]
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_3X_dict.xlsx')
outpath1_SA_nomm = "media/SA_nomm_7mer_3X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_3X_dict_count.xlsx')
outpath2_SA_nomm = "media/SA_nomm_7mer_3X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set2 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_3X(seq)
for x in x_data:
if (x not in id_set2):
id_set2[x] = set()
id_set2[x].add(ID)
else:
id_set2[x].add(ID)
id_set2.items()
with open(outpath1_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set2.items()]
with open(outpath2_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set2.items()]
workbook = xlsxwriter.Workbook('media/7mer_3X_dict.xlsx')
outpath1 = "media/7mer_3X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/7mer_3X_count_dict.xlsx')
outpath2 = "media/7mer_3X_count_dict.xlsx"
SA_nomm = pd.read_excel(inpath_SA_nomm)
A_nomm = pd.read_excel(inpath_A_nomm)
SA_mm = pd.read_excel(inpath_SA_mm)
table = [SA_nomm[['ProteinID', 'G5-box', 'Position']], A_nomm[['ProteinID', 'G5-box', 'Position']],
SA_mm[['ProteinID', 'G5-box', 'Position']]]
# table= [A_nomm[['Entry', 'G5_box', 'Value']], SA_mm[['Entry', 'G5_box', 'Value']]]
data = pd.concat(table)
data = data.reset_index(drop=True)
unique_7mers = data['G5-box'].unique()
temp = data
id_set3 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_3X(seq)
for x in x_data:
if (x not in id_set3):
id_set3[x] = set()
id_set3[x].add(ID)
else:
id_set3[x].add(ID)
id_set3.items()
with open(outpath1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set3.items()]
with open(outpath2, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set3.items()]
def list_of_7mer_4X(sevenmer):
x_data = []
for r1 in range(7):
for r2 in range(7):
for r3 in range(7):
for r4 in range(7):
if (r1 != r2 and r1 != r3 and r1 != r4 and r2 != r3 and r2 != r4 and r3 != r4):
x = list(sevenmer)
x[r1] = "X"
x[r2] = "X"
x[r3] = "X"
x[r4] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_4X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_4X_dict.xlsx"
workbook = xlsxwriter.Workbook("media/SA_mm_7mer_4X_dict_count.xlsx")
outpath2_SA_mm = "media/SA_mm_7mer_4X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_mm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_4X(seq)
for x in x_data:
if (x not in id_set):
id_set[x] = set()
id_set[x].add(ID)
else:
id_set[x].add(ID)
id_set.items()
with open(outpath1_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set.items()]
with open(outpath2_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set.items()]
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_4X_dict.xlsx')
outpath1_A_nomm = "media/A_nomm_7mer_4X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_4X_dict_count.xlsx')
outpath2_A_nomm = "media/A_nomm_7mer_4X_dict_count.xlsx"
data = pd.read_excel(inpath_A_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set1 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_4X(seq)
for x in x_data:
if (x not in id_set1):
id_set1[x] = set()
id_set1[x].add(ID)
else:
id_set1[x].add(ID)
id_set1.items()
with open(outpath1_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set1.items()]
with open(outpath2_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set1.items()]
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_4X_dict.xlsx')
outpath1_SA_nomm = "media/SA_nomm_7mer_4X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_4X_dict_count.xlsx')
outpath2_SA_nomm = "media/SA_nomm_7mer_4X_dict_count.xlsx"
data = pd.read_excel(inpath_SA_nomm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set2 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_4X(seq)
for x in x_data:
if (x not in id_set2):
id_set2[x] = set()
id_set2[x].add(ID)
else:
id_set2[x].add(ID)
id_set2.items()
with open(outpath1_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set2.items()]
with open(outpath2_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set2.items()]
workbook = xlsxwriter.Workbook('media/7mer_4X_dict.xlsx')
outpath1 = "media/7mer_4X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/7mer_4X_count_dict.xlsx')
outpath2 = "media/7mer_4X_count_dict.xlsx"
SA_nomm = pd.read_excel(inpath_SA_nomm)
A_nomm = pd.read_excel(inpath_A_nomm)
SA_mm = pd.read_excel(inpath_SA_mm)
table = [SA_nomm[['ProteinID', 'G5-box', 'Position']], A_nomm[['ProteinID', 'G5-box', 'Position']],
SA_mm[['ProteinID', 'G5-box', 'Position']]]
# table= [A_nomm[['Entry', 'G5_box', 'Value']], SA_mm[['Entry', 'G5_box', 'Value']]]
data = pd.concat(table)
data = data.reset_index(drop=True)
unique_7mers = data['G5-box'].unique()
temp = data
id_set3 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_4X(seq)
for x in x_data:
if (x not in id_set3):
id_set3[x] = set()
id_set3[x].add(ID)
else:
id_set3[x].add(ID)
id_set3.items()
with open(outpath1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set3.items()]
with open(outpath2, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set3.items()]
with open(outpath2, 'rU') as f:
reader = csv.reader(f)
next(reader)
answer = max(int(column[1].replace(',', '').replace('[', '').replace(']', '')) for column in reader)
for each in id_set3.items():
if (len(each[1]) == answer):
str3 = each[0]
if (str3.startswith("X")):
str3 = str3[1:]
if (str3.endswith("X")):
x43 = str3[:-1]
elif (str3.startswith("X")):
x43 = str3[1:]
else:
x43 = str3
elif (str3.endswith("X")):
str3 = str3[:-1]
if (str3.startswith("X")):
x43 = str3[1:]
elif (str3.endswith("X")):
x43 = str3[:-1]
else:
x43 = str3
else:
x43 = str3
abc = []
l1 = []
inpath = "media/new.xlsx"
workbook = xlsxwriter.Workbook('media/output_new.xlsx')
outpath = "media/output_new.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x43, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position.1', 'G4-box', 'Position.2',
'G5-box', 'Position.3'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
df = pd.read_excel("media/output_new.xlsx")
df.to_html("media/output_new.html")
df = pd.read_excel("media/output_wo_bias.xlsx")
df.to_html("media/output_wo_bias.html")
counter = {}
inpath = "media/new.xlsx"
inpath_before = "media/output_new.xlsx"
workbook = xlsxwriter.Workbook('media/neg.xlsx')
outpath = "media/neg.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for j in range(50):
abc = []
l1 = []
for i in range(len(protein)):
s = shuffler(protein[i])
l = H(protein_id[i], s, x1, x2, x3, x43, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc, columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box',
'Position', 'G5-box', 'Position'])
counter[j] = df_gdomain_counter(gdomains)
outpath1 = "media/after.csv"
outpath2 = "media/before.csv"
xyz = pd.DataFrame(pd.concat(counter.values()))
xyz['Protein'] = xyz.index
xyz_mean_after = xyz.groupby(['Protein']).sum() / 50
xyz_mean_after['Protein'] = xyz_mean_after.index
xyz_mean_after.to_csv(outpath1)
xyz_before = | pd.read_excel(inpath_before) | pandas.read_excel |
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import seaborn as sns
from gensim.models.ldamulticore import LdaMulticore
import numpy as np
# load the model from disk
filename = 'models/trained_lda.sav'
ldamodel = LdaMulticore.load(filename)
filename = 'models/trained_lda_corpus.sav'
corpus = pickle.load(open(filename, 'rb'))
#load the files
data_files = ["data/pubmed_articles_cancer_01_smaller.csv", "data/pubmed_articles_cancer_02_smaller.csv",
"data/pubmed_articles_cancer_03_smaller.csv","data/pubmed_articles_cancer_04_smaller.csv"]
input_data = pd.DataFrame()
print("load the files")
for file in data_files:
df_load = pd.read_csv(file,skip_blank_lines=True)
input_data = input_data.append(df_load)
input_data.abstract = input_data.abstract.astype('str')
dates_df = input_data.copy()
dates_df = dates_df[["pubmed_id", "created_date"]]
print("get weights")
# https://stackoverflow.com/questions/62174945/gensim-extract-100-most-representative-documents-for-each-topic
topic_probs = ldamodel.get_document_topics(corpus) #get the list of topic probabilities by doc
topic_dict = [dict(x) for x in topic_probs] #convert to dictionary to convert to data frame
df = pd.DataFrame(topic_dict).fillna(0) #convert to data frame, fill topics < 0.01 as 0
df = df.reindex(sorted(df.columns), axis=1)
columns_names = ["infection risk", "thyroid cancer", "safety and efficacy", "leukemia chemotherapy", "surgical intervention",
"lymph nodes detection", "pain management", "cervical cancer", "bladder cancer", "risk prediction",
"adjuvant therapy", "healthy habits", "hematologic toxicity", "surgical complications", "tumor angiogenesis",
"Intraoperative Radiation Therapy", "radiotherapy", "stem cell transplantation", "glioma", "behavioral intervention",
"prostate cancer"]
df.columns = columns_names
print("create new df")
# Topics per decade
Tot = 6
Cols = 3
# Compute Rows required
Rows = Tot // Cols
Rows += Tot % Cols
# Create a Position index
Position = range(1,Tot + 1)
for column in df.columns:
df[column] = np.where(df[column].values <= 0.05, 0, 1)
transformed_df = dates_df.join(df)
transformed_df.reset_index(drop=True, inplace=True)
#EDA: Very little data before 1975
transformed_df['created_date'] = | pd.to_datetime(transformed_df['created_date'], format="%m/%d/%Y %H:%M") | pandas.to_datetime |
""" A script package helping you make plots for paper.
"""
import os, sys, json
import numpy as np
from numpy.lib.arraysetops import isin
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from contextlib import redirect_stderr
from os import devnull
from exptools.launching.variant import flatten_variant4hparams
# NOTE: this could limit the number of curves of your plot
color_defaults = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf', # blue-teal
]
class PaperCurvePlotter:
def __init__(self, exp_paths,
log_filename= "progress.csv",
param_filename= "variant_config.json",
):
"""
@ Args:
exp_paths: list[str] absolute paths
n_fig_a_row: int How many figure you want to put in a row
fig_name_shorten_level: int to make the figure title shorter, the label
will ignore the first some.
"""
self.exp_paths = exp_paths
self.log_filename = log_filename
self.param_filename = param_filename
self.color_map = dict()
self.df = pd.DataFrame()
self.interpret_exp_paths()
def interpret_exp_paths(self):
""" Go through all paths, and log them into a pandas data frame arranged by params
"""
# extract all valid log directory
for exp_path in self.exp_paths:
for path, subdirs, subfiles in os.walk(exp_path):
if self.log_filename in subfiles and self.param_filename in subfiles:
# get a valid directory
with open(os.path.join(path, self.param_filename)) as f:
param = json.load(f)
# to make all dictionary in the same level, which is easier to process
param = flatten_variant4hparams(param)
abspath = os.path.abspath(path)
param["experiment_log_path"] = abspath
self.df = self.df.append(param, ignore_index= True)
if self.df.empty:
Warning("You got an empty database, please check your exp_paths")
def make_plots(self, args_in_figures, args_in_series, x_key, y_key,
xlabel= None, ylabel= None, margins= (1, 1, 1, 1),
x_lim: tuple= (-5*1e6, 50*1e6),
y_lim: tuple=(0.0, 1e3),
sci_lim: tuple= None,
n_fig_a_row= 4,
one_fig_size= (5, 5),
fontsize= 18,
fig_name_shorten_level= 0,
show_legend= False,
):
""" The main entrance ploting all experiments by design. The two arguments are specifying
what experiments you want to plot. Both of them are dictionaries, whose keys are param keys
and values are a list of values you want to plot. If providing None, we will dig out all
options in this given key.
@ Args:
args_in_figures: dict{str: list} plot all experiments that meet the specifications
(seperated in each figure)
args_in_series: dict{str: list} plot all experiments that meet the specifications
(seperated in each curve)
margins: a tuple of 4 margin in inches, in the order of (top, bottom, left, right)
sci_lim: tuple(int, int) demote the scientific notation of x and y, (exponent of 10)
"""
self.n_fig_a_row = n_fig_a_row
self.one_fig_size = one_fig_size
self.fontsize = fontsize
self.x_lim = x_lim
self.y_lim = y_lim
self.fig_name_shorten_level = fig_name_shorten_level
self.show_legend = show_legend
self.marked_labels = []
n_figures = 1
figure_keys, figure_all_options = [], []
for key, options in args_in_figures.items():
# options: list
# find all args which is assigned as None, and seperate them
if options is None:
options = [*(self.df[key].unique())]
n_figures *= len(options)
figure_all_options.append(options)
figure_keys.append(key)
# record mapping from label name to options, which makes a 2 level nested dict
self.series_label_mapping = dict()
series_keys, series_all_options = [], []
for key, options in args_in_series.items():
if options is None:
options = [*(self.df[key].unique())]
series_all_options.append(options)
series_keys.append(key)
for i, options in enumerate(itertools.product(*series_all_options)):
label = ""
optionset = dict()
for key, option in zip(series_keys, options):
label += str(key) + ":" + str(option) + ";"
optionset[key] = option
self.series_label_mapping[label] = optionset
self.color_map[label] = color_defaults[i]
self.fig, axs = self.create_figure(n_figures)
# plot each figure one by one
for ax, fig_optionset in zip(
axs.flat, itertools.product(*figure_all_options)
):
# get all rows that meet the figure options
fig_df = self.df
fig_name = ""
for fig_key, fig_opt in zip(figure_keys, fig_optionset):
fig_df = fig_df.loc[fig_df[fig_key] == fig_opt]
fig_key = fig_key.split(".", self.fig_name_shorten_level)[-1]
fig_name += fig_key + ":" + str(fig_opt) + ";"
# plot each curve
for label, series_optionset in self.series_label_mapping.items():
# get all rows that meet the series options
ser_df = fig_df
for ser_key, ser_opt in series_optionset.items():
ser_df = ser_df.loc[ser_df[ser_key] == ser_opt]
self.plot_exp(ax,
ser_df["experiment_log_path"], x_key, y_key,
label= label,
)
# warp up for this figure
ax.grid(color= "gray", linewidth= 0.5)
ax.tick_params(axis='both', labelsize=14)
ax.set_title(fig_name, fontsize=16)
ax.set_xlim(self.x_lim)
ax.ticklabel_format(style='sci', axis='x', scilimits=sci_lim)
self.finish_plot(xlabel, ylabel, margins)
def finish_plot(self, xlabel, ylabel, margins):
if xlabel is not None: plt.xlabel(xlabel, fontsize= self.fontsize)
if ylabel is not None: plt.ylabel(ylabel, fontsize= self.fontsize)
plt.xlim(self.x_lim)
plt.ylim(self.y_lim)
plt.subplots_adjust(
top = 1 - (margins[0] / self.fig.get_size_inches()[0]),
bottom= margins[1] / self.fig.get_size_inches()[1],
left= margins[2] / self.fig.get_size_inches()[0],
)
if self.show_legend:
plt.subplots_adjust(right= 1 - ((margins[3] + 2) / self.fig.get_size_inches()[0]))
plt.legend(
loc='upper left',
bbox_to_anchor= (1, 1),
handles= [mlines.Line2D([],[], color= v, label= k) for k, v in self.color_map.items()]
)
else:
plt.subplots_adjust(
right= 1 - (margins[3] / self.fig.get_size_inches()[0])
)
save_name = ylabel + "_to_" + xlabel + "_plots.png"
save_filename = os.path.join(os.getcwd(), save_name)
print("image saved at {}".format(save_filename))
plt.savefig(save_filename)
def create_figure(self, n_figures):
legend_margin = 5 if self.show_legend else 0
if n_figures > 1:
n_cols = min(n_figures, self.n_fig_a_row)
n_rows = (n_figures-1) // self.n_fig_a_row + 1
fig, axs = plt.subplots(n_rows, n_cols,
sharex= True,
figsize= (n_cols*self.one_fig_size[0] + legend_margin, n_rows*self.one_fig_size[1] + 1)
)
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(axis='both', which='both', bottom=False, top=False,
left=False, right=False, labelcolor='none')
else:
fig, ax = plt.subplots(1, 1,
figsize= (self.one_fig_size[0] + legend_margin, self.one_fig_size[1] + 1)
)
axs = np.array(ax)
return fig, axs
def plot_exp(self, ax, paths: list, x: str= None, y= "eprew",
label= ""):
""" plot data based on given experiment logs, curve will be compute mean for all logs
@ Args:
paths: list[str] a list of absolute path
x, y: str telling which curve you want to plot (assuming lots of curves in a .csv)
"""
all_runs = []
nframes = None
for path in paths:
with open(os.path.join(path, self.log_filename), "r") as f:
try:
df = | pd.read_csv(f) | pandas.read_csv |
from urllib.request import Request, urlopen
import time
from bs4 import BeautifulSoup
import requests
import json
import pandas as pd
from pandas.io.json import json_normalize
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))
import camping_server2.config as config
import xmltodict
class KoreaTourApi:
def __init__(self):
self.secretKey = config.Config.PUBLIC_API_KEY
def festivalAPI(self, eventStartDate):
"""
축제 검색 > 축제 시작일 설정
eventStartDate: YYYYMMDD 양식으로 기입 (ex: 20210601)
"""
url = 'http://api.visitkorea.or.kr/openapi/service/rest/KorService/searchFestival?'
param = 'ServiceKey=' + self.secretKey + '&MobileOS=ETC&MobileApp=AppTest&numOfRows=3000&arrange=A&listYN=Y'
detail_param = f'&eventStartDate={eventStartDate}'
request = Request(url + param + detail_param)
request.get_method = lambda: 'GET'
response = urlopen(request)
rescode = response.getcode()
if rescode == 200:
responseData = response.read()
rD = xmltodict.parse(responseData)
rDJ = json.dumps(rD)
rDD = json.loads(rDJ)
print(rDD)
festival_api_df = json_normalize(rDD['response']['body']['items']['item'])
return festival_api_df
# festival_api_df.to_csv(config.Config.PATH + "festival_api_info.csv", encoding='utf-8-sig')
def tourspotAPI(self, i, contentTypeId, radius=1000):
"""
내 위치 기반 관광지 검색
i: camp_api_df의 i번째 row 캠핑장 기준 검색
contentType: 하단 contentType_dict 에서 골라 숫자 기입
- contentType_dict= {'festival': 15, 'tourspot': 12, 'shopping': 38, 'restaurant': 39, }
radius: 경도 위도 지점의 반경 몇 m 이내 검색 (default = 1000m)
"""
camp_api_df = pd.read_csv(config.Config.PATH + "/camp_api_info.csv", encoding='utf-8-sig')
mapX = camp_api_df['mapX'].iloc[i]
mapY = camp_api_df['mapY'].iloc[i]
url = 'http://api.visitkorea.or.kr/openapi/service/rest/KorService/locationBasedList?'
param = 'ServiceKey='+self.secretKey+'&MobileOS=ETC&MobileApp=AppTest&numOfRows=3000'
detail_param = f'contentTypeId={contentTypeId}&mapX={mapX}&mapY={mapY}&radius={radius}&listYN=Y'
request = Request(url+param+detail_param)
request.get_method = lambda: 'GET'
response = urlopen(request)
rescode = response.getcode()
if rescode == 200:
responseData = response.read()
# rDD_list = {}
rD = xmltodict.parse(responseData)
rDJ = json.dumps(rD)
rDD = json.loads(rDJ)
# rDD_list.append(rDD)
print(rDD)
tourspot_api_df = json_normalize(rDD['response']['body']['items']['item'])
return tourspot_api_df
# tourspot_api_df.to_csv(config.Config.PATH + "tourspot_api_info.csv", encoding='utf-8-sig')
# 지역 기반 관광지 검색
def tourlistAPI(self, num):
item_list = ["addr1", "addr2", "areacode", "booktour", "cat1", "cat2", "cat3", "contentid", "contenttypeid", "createdtime",
"firstimage", "firstimage2", "mapx", "mapy", "mlevel", "readcount", "sigungucode", "tel", "title", "zipcode"]
data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 15:49:39 2020
@author: Jyanqa
"""
import plotly.graph_objects as go
import Setting as s
#import plotly.express as px
import pandas as pd
#from plotly import io
from statsmodels.base.model import GenericLikelihoodModel
from scipy import stats
import numpy as np
#import statsmodels.api as sm
import scipy.stats as st
class LR(GenericLikelihoodModel):
def loglike(self, params):
scale = params[-1]
weights = params[:-1]
y_hat = np.sum(weights * self.exog[:, :-1], axis=-1)
return np.sum(stats.norm.logpdf(self.endog, loc=y_hat, scale=scale))
def boot(y,x):
model = LR(y, x)
res = model.fit()
resboot = res.bootstrap(s.noboot) #boot 10 times
mean = resboot[0]
std = resboot[1]
z_score = st.norm.ppf(1 - (1 - s.confi)/2)
upCI = mean + (z_score*(std/s.noboot))
lowCI = mean - (z_score*(std/s.noboot))
bootres = | pd.DataFrame({'Average Coefs':mean, 'Upper CI Coefs': upCI, 'Lower CI Coefs':lowCI}) | pandas.DataFrame |
import datetime
from flask import Flask,redirect, url_for, request
import matplotlib.pyplot as plt
import pandas as pd
import anomalies.config as config
def plot_results():
year = request.form["year"]
from_month = request.form["from_month"]
to_month = request.form["to_month"]
currency = request.form["currency"]
print('anomalies are detecting...')
print('year: ' + str(year))
print('from_month: ' + str(from_month))
print('to_month: ' + str(to_month))
start_date = request.form["year"] + "-" + request.form["from_month"] + "-01"
#end_date = request.form["year"] + "-" + str(int(request.form["to_month"]) + 1) + "-01"
if (int(request.form["to_month"]) == 12):
end_date = str(int(request.form["year"]) + 1) + "-" + str(1) + "-01"
else:
end_date = request.form["year"] + "-" + str(int(request.form["to_month"]) + 1) + "-01"
quotes = pd.read_csv("static/data/"+currency+"/DAT_MT_"+currency+"_M1_" + str(year) + ".csv")
quotes['Time'] = quotes[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
quotes['Time'] = quotes['Time'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
import numpy as np
import pandas as pd
import os
import re
import xml.etree.ElementTree as ET
from pylatex import Document, Section, Subsection, Command, LargeText
from pylatex.utils import italic, NoEscape, bold
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from tjbioarticles.keywords import Keywords
from tjbioarticles.pubmed_extraction import PubMedExtraction
class ArticlesData(object):
'''The ArticlesData object collects the articles informations in a useful
DataFrame and allows some output analysis'''
def __init__(self, output_dir, xml_dir, latex_dir, email):
self.output_dir = output_dir
self.xml_dir = xml_dir
self.latex_dir = latex_dir
self.country_dict = pd.read_excel(r'input_data/country_dict.xlsx')
self.email = email
self.data_analysis = pd.DataFrame()
self.create_data_structs()
return
def load_data(self, data_excel):
self.data_analysis = pd.read_excel(data_excel)
return
def load_xml(self, filename):
mytree = ET.parse(filename)
self.xml_data = mytree.getroot()
return
def export_data(self, output_excel):
final_data = self.data_analysis.drop('abstract', axis=1)
final_data.to_excel(output_excel, index=False)
return
def insert_values_from_excel(self, input_excel):
data = pd.read_excel(r'input_data/country_dict.xlsx')
self.data_analysis = self.data_analysis.append(data,
ignore_index=True)
return
def insert_values_from_PubMed_keywords(self, keywords, file_keysearch,
file_articles_found):
pub_data = PubMedExtraction(self.email)
pub_data.find_id_from_key(keywords)
self.collect_pubmed_data(pub_data, keywords)
keywords.save_keys_output(file_keysearch, file_articles_found)
return
def insert_values_from_PubMed_title(self, title):
pub_data = PubMedExtraction()
pub_data.find_id_from_title(title)
self.collect_pubmed_data(pub_data)
return
def create_data_structs(self):
'''The create_data_columns function generates the analysis_data
columns.'''
self.data_analysis['id'] = ''
self.data_analysis['filename'] = ''
self.data_analysis['year'] = ''
self.data_analysis['first_author'] = ''
self.data_analysis['title'] = ''
self.data_analysis['journal'] = ''
self.data_analysis['pub_status'] = ''
self.data_analysis['authors'] = ''
self.data_analysis['authors_info'] = ''
self.data_analysis['country'] = ''
self.data_analysis['keywords_major'] = ''
self.data_analysis['keywords_minor'] = ''
self.data_analysis['search_key'] = ''
self.data_analysis['abstract'] = ''
self.abstract = {}
return
def collect_pubmed_data(self, pub_data, keywords):
for article_id in pub_data.idlist:
xml_data = pub_data.find_info_xml(article_id)
f = open(self.xml_dir + 'tmp.xml', "wb")
f.write(xml_data)
f.close()
mytree = ET.parse(self.xml_dir + 'tmp.xml')
self.xml_data = mytree.getroot()
data = | pd.DataFrame([article_id], columns=['id']) | pandas.DataFrame |
## Code for automated data processing for ML
## Author: <NAME>
## Date: 2016-01-01
## importing libraries
import numpy as np
import pandas as pd
import sys
from sklearn.preprocessing import LabelEncoder
## class for data
class Data:
## initialization function
def __init__(self, train, test):
self.train_raw = train
self.test_raw = test
self.train = pd.DataFrame.copy(train)
self.test = pd.DataFrame.copy(test)
## function for extracting id column
def extract_ids(self):
"""Extracts id column if present, else generates default.
"""
# checking if first column is id
first_column = self.train.columns[0].lower()
id_flag = 0
if "id" in first_column or "no" in first_column or "number" in first_column:
while True:
id_input = str(input("Is %s the observation id/number?(y/n): " % (str(self.train.columns[0]))).lower())
if id_input.lower() not in ["y","n"]:
print("Please enter y or n")
else:
print("")
break
break
if id_input == "y":
id_flag = 1
id_column = self.train.columns[0]
self.train_ids = np.array(self.train[id_column])
self.train.drop(id_column, axis=1, inplace=True)
print("Column %s extracted as id from train data" % (id_column))
try:
self.test_ids = np.array(self.test[id_column])
self.test.drop(id_column, axis=1, inplace=True)
print("Column %s extracted as id from test data" % (id_column))
except:
self.test_ids = np.arange(len(self.test)) + 1
print("Column %s not found in test data, created default ids" % (id_column))
# asking for id column
if id_flag == 0:
while True:
id_column = str(input("Please enter column name of id (or type none if no such column exists): "))
if id_column.lower() != "none" and id_column not in self.train.columns.values:
print("Column %s not found in train data" % (id_column))
else:
print("")
break
break
if id_column != "none":
self.train_ids = np.array(self.train[id_column])
self.train.drop(id_column, axis=1, inplace=True)
print("Column %s extracted as id from train data" % (id_column))
try:
self.test_ids = np.array(self.test[id_column])
self.test.drop(id_column, axis=1, inplace=True)
print("Column %s extracted as id from test data" % (id_column))
except:
self.test_ids = np.arange(len(self.test)) + 1
print("Column %s not found in test data, created default ids" %(id_column))
else:
self.train_ids = np.arange(len(self.train)) + 1
self.test_ids = np.arange(len(self.test)) + 1
print("Created default ids for train data")
print("Created default ids for test data")
print("")
## function for extracting target variable
def extract_target(self):
"""Extracts target variable.
"""
target_flag = 0
# checking if target variable is present in train data
for colname in self.train.columns.values:
if colname.lower() in ["response","result","target"]:
while True:
target_input = str(input("Is %s the target variable?(y/n): " % (colname)))
if target_input not in ["y","n"]:
print("Please enter y or n")
else:
print("")
break
break
if target_input == "y":
target_flag = 1
self.target = np.array(self.train[colname])
self.train.drop(colname, axis=1, inplace=True)
if colname in self.test.columns.values:
self.test.drop(colname, axis=1, inplace=True)
print("Column %s extracted as target variable from data" % (colname))
# asking for target variable
if target_flag == 0:
while True:
target_column = str(input("Please enter column name of target variable (or q to quit): "))
if target_column == "q":
sys.exit()
if target_column not in self.train.columns.values:
print("Column %s not found in train data" % (target_column))
else:
print("")
break
break
self.target = np.array(self.train[target_column])
self.train.drop(target_column, axis=1, inplace=True)
if target_column in self.test.columns.values:
self.test.drop(target_column, axis=1, inplace=True)
print("Column %s extracted as target variable from data" % (target_column))
## function for checking columns
def check_column_names(self):
"""Checks if all columns are present and removes ones that aren"t.
"""
train_cols = []
test_cols = []
# extracting columns present in train but not in test
for colname in self.train.columns:
if colname not in self.test.columns:
train_cols.append(colname)
# extracting columns present in test but not in train
for colname in self.test.columns:
if colname not in self.train.columns:
test_cols.append(colname)
# removing columns from train
if len(train_cols) > 0:
for i in train_cols:
del self.train[i]
print("Column %s not found in test data, removed from train data" % (i))
# removing columns from test
if len(test_cols) > 0:
for i in test_cols:
del self.test[i]
print("Column %s not found in train data, removed from test data" % (i))
self.test = self.test[self.train.columns]
print("")
## function for removing constant columns
def remove_constant_variables(self):
"""Removes all columns with constant value.
"""
# creating panel
panel = | pd.concat([self.train, self.test], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
#
#################
# This script takes as an input the ice core data
# from MacFarling Meure et al. (2006), imported in csv format from
# the original .xlsx file, and returns a csv file with
# tidy inferred atmospheric concentrations since year 1 of the current era.
#
# Last updated: Apr 2021
# Author: <NAME>
#
#################
import numpy as np
import pandas as pd
######### Get clean ice core data #########
data_ = pd.DataFrame(pd.read_csv('../processed/law2006_by_year_clean.csv', header=0))
# # Aggregate data
proc_data = | pd.DataFrame() | pandas.DataFrame |
from random import randint
import pandas as pd
import pytest
from janitor.timeseries import _get_missing_timestamps, fill_missing_timestamps
# Random data for testing
@pytest.fixture
def timeseries_dataframe() -> pd.DataFrame:
"""
Returns a time series dataframe
"""
ts_index = | pd.date_range("1/1/2019", periods=1000, freq="1H") | pandas.date_range |
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_mutate_groups():
# GH3380
df = pd.DataFrame(
{
"cat1": ["a"] * 8 + ["b"] * 6,
"cat2": ["c"] * 2
+ ["d"] * 2
+ ["e"] * 2
+ ["f"] * 2
+ ["c"] * 2
+ ["d"] * 2
+ ["e"] * 2,
"cat3": [f"g{x}" for x in range(1, 15)],
"val": np.random.randint(100, size=14),
}
)
def f_copy(x):
x = x.copy()
x["rank"] = x.val.rank(method="min")
return x.groupby("cat2")["rank"].min()
def f_no_copy(x):
x["rank"] = x.val.rank(method="min")
return x.groupby("cat2")["rank"].min()
grpby_copy = df.groupby("cat1").apply(f_copy)
grpby_no_copy = df.groupby("cat1").apply(f_no_copy)
tm.assert_series_equal(grpby_copy, grpby_no_copy)
def test_no_mutate_but_looks_like():
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)
tm.assert_series_equal(result1, result2)
def test_apply_function_with_indexing():
# GH: 33058
df = pd.DataFrame(
{"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
)
def fn(x):
x.col2[x.index[-1]] = 0
return x.col2
result = df.groupby(["col1"], as_index=False).apply(fn)
expected = pd.Series(
[1, 2, 0, 4, 5, 0],
index=pd.MultiIndex.from_tuples(
[(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)]
),
name="col2",
)
tm.assert_series_equal(result, expected)
def test_apply_mutate_columns_multiindex():
# GH 12652
df = pd.DataFrame(
{
("C", "julian"): [1, 2, 3],
("B", "geoffrey"): [1, 2, 3],
("A", "julian"): [1, 2, 3],
("B", "julian"): [1, 2, 3],
("A", "geoffrey"): [1, 2, 3],
("C", "geoffrey"): [1, 2, 3],
},
columns=pd.MultiIndex.from_tuples(
[
("A", "julian"),
("A", "geoffrey"),
("B", "julian"),
("B", "geoffrey"),
("C", "julian"),
("C", "geoffrey"),
]
),
)
def add_column(grouped):
name = grouped.columns[0][1]
grouped["sum", name] = grouped.sum(axis=1)
return grouped
result = df.groupby(level=1, axis=1).apply(add_column)
expected = pd.DataFrame(
[
[1, 1, 1, 3, 1, 1, 1, 3],
[2, 2, 2, 6, 2, 2, 2, 6],
[
3,
3,
3,
9,
3,
3,
3,
9,
],
],
columns=pd.MultiIndex.from_tuples(
[
("geoffrey", "A", "geoffrey"),
("geoffrey", "B", "geoffrey"),
("geoffrey", "C", "geoffrey"),
("geoffrey", "sum", "geoffrey"),
("julian", "A", "julian"),
("julian", "B", "julian"),
("julian", "C", "julian"),
("julian", "sum", "julian"),
]
),
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Copyright 2019 The Glow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glow import glow
import pandas as pd
import numpy as np
from pyspark import SparkContext
from pyspark.sql import DataFrame, Row, SparkSession, SQLContext
from typeguard import check_argument_types, check_return_type
from typing import Any, Dict, List
from nptyping import Float, NDArray
from .ridge_reduction import RidgeReduction
from .ridge_regression import RidgeRegression
from .logistic_ridge_regression import LogisticRidgeRegression
__all__ = [
'get_sample_ids', 'block_variants_and_samples', 'reshape_for_gwas', 'estimate_loco_offsets'
]
def _get_contigs_from_loco_df(df: pd.DataFrame) -> pd.Series:
return df.index.get_level_values(1).unique()
def __validate_sample_ids(sample_ids: List[str]):
""""
Validates that a set of sample IDs are valid (non-empty and unique).
"""
assert check_argument_types()
if any(not s for s in sample_ids):
raise Exception("Cannot have empty sample IDs.")
if len(sample_ids) != len(set(sample_ids)):
raise Exception("Cannot have duplicated sample IDs.")
def __get_index_map(sample_ids: List[str], sample_block_count: int,
sql_ctx: SQLContext) -> Dict[str, List[str]]:
"""
Creates an index mapping from sample blocks to a list of corresponding sample IDs. Uses the same sample-blocking
logic as the blocked GT matrix transformer.
Requires that:
- Each variant row has the same number of values
- The number of values per row matches the number of sample IDs
Args:
sample_ids : The list of sample ID strings
sample_block_count : The number of sample blocks
Returns:
index mapping from sample block IDs to a list of sample IDs
"""
assert check_argument_types()
sample_id_df = sql_ctx.createDataFrame([Row(values=sample_ids)])
make_sample_blocks_fn = SparkContext._jvm.io.projectglow.transformers.blockvariantsandsamples.VariantSampleBlockMaker.makeSampleBlocks
output_jdf = make_sample_blocks_fn(sample_id_df._jdf, sample_block_count)
output_df = DataFrame(output_jdf, sql_ctx)
index_map = {r.sample_block: r.values for r in output_df.collect()}
assert check_return_type(index_map)
return index_map
def get_sample_ids(data: DataFrame) -> List[str]:
"""
Extracts sample IDs from a variant DataFrame, such as one read from PLINK files.
Requires that the sample IDs:
- Are in ``genotype.sampleId``
- Are the same across all the variant rows
- Are a list of strings
- Are non-empty
- Are unique
Args:
data : The variant DataFrame containing sample IDs
Returns:
list of sample ID strings
"""
assert check_argument_types()
distinct_sample_id_sets = data.selectExpr("genotypes.sampleId as sampleIds").distinct()
if distinct_sample_id_sets.count() != 1:
raise Exception("Each row must have the same set of sample IDs.")
sample_ids = distinct_sample_id_sets.head().sampleIds
__validate_sample_ids(sample_ids)
assert check_return_type(sample_ids)
return sample_ids
def block_variants_and_samples(variant_df: DataFrame, sample_ids: List[str],
variants_per_block: int,
sample_block_count: int) -> (DataFrame, Dict[str, List[str]]):
"""
Creates a blocked GT matrix and index mapping from sample blocks to a list of corresponding sample IDs. Uses the
same sample-blocking logic as the blocked GT matrix transformer.
Requires that:
- Each variant row has the same number of values
- The number of values per row matches the number of sample IDs
Args:
variant_df : The variant DataFrame
sample_ids : The list of sample ID strings
variants_per_block : The number of variants per block
sample_block_count : The number of sample blocks
Returns:
tuple of (blocked GT matrix, index mapping)
"""
assert check_argument_types()
first_row = variant_df.selectExpr("size(values) as numValues").take(1)
if not first_row:
raise Exception("DataFrame has no values.")
num_values = first_row[0].numValues
if num_values != len(sample_ids):
raise Exception(
f"Number of values does not match between DataFrame ({num_values}) and sample ID list ({len(sample_ids)})."
)
__validate_sample_ids(sample_ids)
blocked_gt = glow.transform("block_variants_and_samples",
variant_df,
variants_per_block=variants_per_block,
sample_block_count=sample_block_count)
index_map = __get_index_map(sample_ids, sample_block_count, variant_df.sql_ctx)
output = blocked_gt, index_map
assert check_return_type(output)
return output
def reshape_for_gwas(spark: SparkSession, label_df: pd.DataFrame) -> DataFrame:
"""
Reshapes a Pandas DataFrame into a Spark DataFrame with a convenient format for Glow's GWAS
functions. This function can handle labels that are either per-sample or per-sample and
per-contig, like those generated by GloWGR's transform_loco function.
Examples:
.. invisible-code-block:
import pandas as pd
>>> label_df = pd.DataFrame({'label1': [1, 2], 'label2': [3, 4]}, index=['sample1', 'sample2'])
>>> reshaped = reshape_for_gwas(spark, label_df)
>>> reshaped.head()
Row(label='label1', values=[1, 2])
>>> loco_label_df = pd.DataFrame({'label1': [1, 2], 'label2': [3, 4]},
... index=pd.MultiIndex.from_tuples([('sample1', 'chr1'), ('sample1', 'chr2')]))
>>> reshaped = reshape_for_gwas(spark, loco_label_df)
>>> reshaped.head()
Row(contigName='chr1', label='label1', values=[1])
Requires that:
- The input label DataFrame is indexed by sample id or by (sample id, contig name)
Args:
spark : A Spark session
label_df : A pandas DataFrame containing labels. The Data Frame should either be indexed by
sample id or multi indexed by (sample id, contig name). Each column is interpreted as a
label.
Returns:
A Spark DataFrame with a convenient format for Glow regression functions. Each row contains
the label name, the contig name if provided in the input DataFrame, and an array containing
the label value for each sample.
"""
assert check_argument_types()
if label_df.index.nlevels == 1: # Indexed by sample id
transposed_df = label_df.T
column_names = ['label', 'values']
elif label_df.index.nlevels == 2: # Indexed by sample id and contig name
# stacking sorts the new column index, so we remember the original sample
# ordering in case it's not sorted
def transpose_one(contig):
transposed = label_df.xs(contig, level=1).T
return transposed
contigs = _get_contigs_from_loco_df(label_df)
transposed_df = pd.concat([transpose_one(contig) for contig in contigs],
keys=contigs,
names=['contig', 'label'])
column_names = ['contigName', 'label', 'values']
else:
raise ValueError('label_df must be indexed by sample id or by (sample id, contig name)')
# Can only create a Spark DataFrame from pandas with ndarray columns in Spark 3+
if int(spark.version.split('.')[0]) < 3:
values = transposed_df.to_numpy().tolist()
else:
values = list(transposed_df.to_numpy())
transposed_df['values_array'] = values
return spark.createDataFrame(transposed_df[['values_array']].reset_index(), column_names)
def estimate_loco_offsets(block_df: DataFrame,
label_df: pd.DataFrame,
sample_blocks: Dict[str, List[str]],
cov_df: pd.DataFrame = | pd.DataFrame({}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
from pandas import Series, _testing as tm
def test_title():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.title()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_lower_upper():
values = Series(["om", np.nan, "nom", "nom"])
result = values.str.upper()
exp = Series(["OM", np.nan, "NOM", "NOM"])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
def test_capitalize():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.capitalize()
exp = | Series(["Foo", "Bar", np.nan, "Blah", "Blurg"]) | pandas.Series |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Batch processing for GRD products
"""
import os
import json
import itertools
import logging
import pandas as pd
from pathlib import Path
from godale._concurrent import Executor
from ost import Sentinel1Scene
from ost.s1 import grd_to_ard
from ost.helpers import raster as ras
from ost.generic import ts_extent
from ost.generic import ts_ls_mask
from ost.generic import ard_to_ts
from ost.generic import timescan
from ost.generic import mosaic
logger = logging.getLogger(__name__)
def _create_processing_dict(inventory_df):
"""Function that creates a dictionary to handle GRD batch processing
This helper function takes the inventory dataframe and creates
a dictionary with the track as key, and all the files to process as
a list, whereas the list is
:param inventory_df:
:return:
"""
# initialize empty dictionary
dict_scenes = {}
# get relative orbits and loop through each
track_list = inventory_df["relativeorbit"].unique()
for track in track_list:
# get acquisition dates and loop through each
acquisition_dates = inventory_df["acquisitiondate"][
inventory_df["relativeorbit"] == track
].unique()
# loop through dates
for i, acquisition_date in enumerate(acquisition_dates):
# get the scene ids per acquisition_date and write into a list
single_id = inventory_df["identifier"][
(inventory_df["relativeorbit"] == track)
& (inventory_df["acquisitiondate"] == acquisition_date)
].tolist()
# add this list to the dictionary and associate the track number
# as dict key
dict_scenes[f"{track}_{i+1}"] = single_id
return dict_scenes
def create_processed_df(inventory_df, list_of_scenes, outfile, out_ls, error):
df = pd.DataFrame(columns=["identifier", "outfile", "out_ls", "error"])
for scene in list_of_scenes:
temp_df = pd.DataFrame()
# get scene_id
temp_df["identifier"] = inventory_df.identifier[
inventory_df.identifier == scene
].values
# fill outfiles/error
temp_df["outfile"] = outfile
temp_df["out_ls"] = out_ls
temp_df["error"] = error
# append to final df and delete temp_df for next loop
df = pd.concat([df, temp_df])
del temp_df
return df
def grd_to_ard_batch(inventory_df, config_file):
# load relevant config parameters
with open(config_file, "r") as file:
config_dict = json.load(file)
download_dir = Path(config_dict["download_dir"])
data_mount = Path(config_dict["data_mount"])
# where all frames are grouped into acquisitions
processing_dict = _create_processing_dict(inventory_df)
processing_df = | pd.DataFrame(columns=["identifier", "outfile", "out_ls", "error"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from ogcore.constants import VAR_LABELS, DEFAULT_START_YEAR
from ogcore import wealth, tax
from ogcore.utils import save_return_table, Inequality
cur_path = os.path.split(os.path.abspath(__file__))[0]
def macro_table(
base_tpi,
base_params,
reform_tpi=None,
reform_params=None,
var_list=["Y", "C", "K", "L", "r", "w"],
output_type="pct_diff",
num_years=10,
include_SS=True,
include_overall=True,
start_year=DEFAULT_START_YEAR,
table_format=None,
path=None,
):
"""
Create a table of macro aggregates.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var_list (list): names of variable to use in table
output_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform (reform-base)
'levels': variables in model units
num_years (integer): number of years to include in table
include_SS (bool): whether to include the steady-state results
in the table
include_overall (bool): whether to include results over the
entire budget window as a column in the table
start_year (integer): year to start table
table_format (string): format to return table in: 'csv', 'tex',
'excel', 'json', if None, a DataFrame is returned
path (string): path to save table to
Returns:
table (various): table in DataFrame or string format or `None`
if saved to disk
"""
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
# Make sure both runs cover same time period
if reform_tpi is not None:
assert base_params.start_year == reform_params.start_year
year_vec = np.arange(start_year, start_year + num_years)
start_index = start_year - base_params.start_year
# Check that reform included if doing pct_diff or diff plot
if output_type == "pct_diff" or output_type == "diff":
assert reform_tpi is not None
year_list = year_vec.tolist()
if include_overall:
year_list.append(str(year_vec[0]) + "-" + str(year_vec[-1]))
if include_SS:
year_list.append("SS")
table_dict = {"Year": year_list}
for i, v in enumerate(var_list):
if output_type == "pct_diff":
# multiple by 100 so in percentage points
results = ((reform_tpi[v] - base_tpi[v]) / base_tpi[v]) * 100
results_years = results[start_index : start_index + num_years]
results_overall = (
(
reform_tpi[v][start_index : start_index + num_years].sum()
- base_tpi[v][start_index : start_index + num_years].sum()
)
/ base_tpi[v][start_index : start_index + num_years].sum()
) * 100
results_SS = results[-1]
results_for_table = results_years
if include_overall:
results_for_table = np.append(
results_for_table, results_overall
)
if include_SS:
results_for_table = np.append(results_for_table, results_SS)
table_dict[VAR_LABELS[v]] = results_for_table
elif output_type == "diff":
results = reform_tpi[v] - base_tpi[v]
results_years = results[start_index : start_index + num_years]
results_overall = (
reform_tpi[v][start_index : start_index + num_years].sum()
- base_tpi[v][start_index : start_index + num_years].sum()
)
results_SS = results[-1]
results_for_table = results_years
if include_overall:
results_for_table = np.append(
results_for_table, results_overall
)
if include_SS:
results_for_table = np.append(results_for_table, results_SS)
table_dict[VAR_LABELS[v]] = results_for_table
else:
results_years = base_tpi[v][start_index : start_index + num_years]
results_overall = results_years.sum()
results_SS = base_tpi[v][-1]
results_for_table = results_years
if include_overall:
results_for_table = np.append(
results_for_table, results_overall
)
if include_SS:
results_for_table = np.append(results_for_table, results_SS)
table_dict[VAR_LABELS[v] + " Baseline"] = results_for_table
if reform_tpi is not None:
results_years = reform_tpi[v][
start_index : start_index + num_years
]
results_overall = results_years.sum()
results_SS = reform_tpi[v][-1]
results_for_table = results_years
if include_overall:
results_for_table = np.append(
results_for_table, results_overall
)
if include_SS:
results_for_table = np.append(
results_for_table, results_SS
)
table_dict[VAR_LABELS[v] + " Reform"] = results_for_table
# Make df with dict so can use pandas functions
table_df = (
pd.DataFrame.from_dict(table_dict, orient="columns")
.set_index("Year")
.transpose()
)
table_df.reset_index(inplace=True)
table_df.rename(columns={"index": "Variable"}, inplace=True)
table = save_return_table(table_df, table_format, path)
return table
def macro_table_SS(
base_ss,
reform_ss,
var_list=["Yss", "Css", "Kss", "Lss", "rss", "wss"],
table_format=None,
path=None,
):
"""
Create a table of macro aggregates from the steady-state solutions.
Args:
base_ss (dictionary): SS output from baseline run
reform_ss (dictionary): SS output from reform run
var_list (list): names of variable to use in table
table_format (string): format to return table in: 'csv', 'tex',
'excel', 'json', if None, a DataFrame is returned
path (string): path to save table to
Returns:
table (various): table in DataFrame or string format or `None`
if saved to disk
"""
table_dict = {
"Variable": [],
"Baseline": [],
"Reform": [],
"% Change (or pp diff)": [],
}
for i, v in enumerate(var_list):
table_dict["Variable"].append(VAR_LABELS[v])
table_dict["Baseline"].append(base_ss[v])
table_dict["Reform"].append(reform_ss[v])
if v != "D/Y":
diff = ((reform_ss[v] - base_ss[v]) / base_ss[v]) * 100
else:
diff = (
reform_ss["Dss"] / reform_ss["Yss"]
- base_ss["Dss"] / base_ss["Yss"]
)
table_dict["% Change (or pp diff)"].append(diff)
# Make df with dict so can use pandas functions
table_df = pd.DataFrame.from_dict(
table_dict, orient="columns"
).set_index("Variable")
table = save_return_table(table_df, table_format, path, precision=3)
return table
def ineq_table(
base_ss,
base_params,
reform_ss=None,
reform_params=None,
var_list=["cssmat"],
table_format=None,
path=None,
):
"""
Creates table with various inequality measures in the model
steady-state.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var_list (list): names of variable to use in table
table_format (string): format to return table in: 'csv', 'tex',
'excel', 'json', if None, a DataFrame is returned
path (string): path to save table to
Returns:
table (various): table in DataFrame or string format or `None`
if saved to disk
"""
table_dict = {
"Steady-State Variable": [],
"Inequality Measure": [],
"Baseline": [],
}
if reform_ss:
table_dict["Reform"] = []
table_dict["% Change"] = []
for i, v in enumerate(var_list):
base_ineq = Inequality(
base_ss[v],
base_params.omega_SS,
base_params.lambdas,
base_params.S,
base_params.J,
)
if reform_ss:
reform_ineq = Inequality(
reform_ss[v],
reform_params.omega_SS,
reform_params.lambdas,
reform_params.S,
reform_params.J,
)
table_dict["Steady-State Variable"].extend(
[VAR_LABELS[v], "", "", "", ""]
)
table_dict["Inequality Measure"].extend(
[
"Gini Coefficient",
"Var of Logs",
"90/10 Ratio",
"Top 10% Share",
"Top 1% Share",
]
)
base_values = np.array(
[
base_ineq.gini(),
base_ineq.var_of_logs(),
base_ineq.ratio_pct1_pct2(0.90, 0.10),
base_ineq.top_share(0.1),
base_ineq.top_share(0.01),
]
)
table_dict["Baseline"].extend(list(base_values))
if reform_ss:
reform_values = np.array(
[
reform_ineq.gini(),
reform_ineq.var_of_logs(),
reform_ineq.ratio_pct1_pct2(0.90, 0.10),
reform_ineq.top_share(0.1),
reform_ineq.top_share(0.01),
]
)
table_dict["Reform"].extend(list(reform_values))
table_dict["% Change"].extend(
list(((reform_values - base_values) / base_values) * 100)
)
# Make df with dict so can use pandas functions
table_df = pd.DataFrame.from_dict(table_dict)
table = save_return_table(table_df, table_format, path, precision=3)
return table
def gini_table(
base_ss,
base_params,
reform_ss=None,
reform_params=None,
var_list=["cssmat"],
table_format=None,
path=None,
):
"""
Creates table with measures of the Gini coefficient: overall,
across lifetime earnings group, and across age.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var_list (list): names of variable to use in table
table_format (string): format to return table in: 'csv', 'tex',
'excel', 'json', if None, a DataFrame is returned
path (string): path to save table to
Returns:
table (various): table in DataFrame or string format or `None`
if saved to disk
"""
table_dict = {"Steady-State Variable": [], "Gini Type": [], "Baseline": []}
if reform_ss:
table_dict["Reform"] = []
table_dict["% Change"] = []
for i, v in enumerate(var_list):
base_ineq = Inequality(
base_ss[v],
base_params.omega_SS,
base_params.lambdas,
base_params.S,
base_params.J,
)
if reform_ss:
reform_ineq = Inequality(
reform_ss[v],
reform_params.omega_SS,
reform_params.lambdas,
reform_params.S,
reform_params.J,
)
table_dict["Steady-State Variable"].extend([VAR_LABELS[v], "", ""])
table_dict["Gini Type"].extend(
["Overall", "Lifetime Income Group, $j$", "Age , $s$"]
)
base_values = np.array(
[
base_ineq.gini(),
base_ineq.gini(type="ability"),
base_ineq.gini(type="age"),
]
)
table_dict["Baseline"].extend(list(base_values))
if reform_ss:
reform_values = np.array(
[
reform_ineq.gini(),
reform_ineq.gini(type="ability"),
reform_ineq.gini(type="age"),
]
)
table_dict["Reform"].extend(list(reform_values))
table_dict["% Change"].extend(
list(((reform_values - base_values) / base_values) * 100)
)
# Make df with dict so can use pandas functions
table_df = | pd.DataFrame.from_dict(table_dict) | pandas.DataFrame.from_dict |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour(self.ordinal, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_map[base]
if mult == 1:
return "Period('%s', '%s')" % (formatted, freqstr)
return ("Period('%s', '%d%s')" % (formatted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
return ("%s" % formatted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_to_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not isinstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not isinstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if isinstance(freq, basestring):
base, mult = _gfc(freq)
else:
base, mult = freq
return lib.dt64arr_to_periodarr(data.view('i8'), base, mult)
# --- Period index sketch
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
"""
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = datetools.get_standard_freq(freq)
if data is None:
if start is None and end is None:
raise ValueError('Must specify start, end, or data')
start = to_period(start, freq)
end = to_period(end, freq)
is_start_intv = isinstance(start, Period)
is_end_intv = isinstance(end, Period)
if (start is not None and not is_start_intv):
raise ValueError('Failed to convert %s to period' % start)
if (end is not None and not is_end_intv):
raise ValueError('Failed to convert %s to period' % end)
if is_start_intv and is_end_intv and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_intv:
freq = start.freq
elif is_end_intv:
freq = end.freq
else:
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
if start is None or end is None:
msg = 'Must specify both start and end if periods is None'
raise ValueError(msg)
data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, Period):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype='i8')
except:
data = np.array(data, dtype='O')
if freq is None:
raise ValueError('freq cannot be none')
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = | _gfc(data.freq) | pandas.tseries.frequencies.get_freq_code |
import pandas as pd
import numpy as np
import csv
import os
from collections import Counter
import pytz
from datetime import datetime
from dateutil.relativedelta import relativedelta
import data_paths
from transit_non_transit_comparision.before_and_after_final_tpu import pos_percent_minus_neg_percent
import utils
# statistics
import statsmodels.formula.api as smf
# Hong Kong and Shanghai share the same time zone.
# Hence, we transform the utc time in our dataset into Shanghai time
time_zone_hk = pytz.timezone('Asia/Shanghai')
october_1_start = datetime(2016, 10, 1, 0, 0, 0, tzinfo=time_zone_hk)
october_31_end = datetime(2016, 10, 31, 23, 59, 59, tzinfo=time_zone_hk)
december_1_start = datetime(2016, 12, 1, 0, 0, 0, tzinfo=time_zone_hk)
december_31_end = datetime(2016, 12, 31, 23, 59, 59, tzinfo=time_zone_hk)
class StudyArea(object):
def __init__(self, area_name, open_month):
assert open_month in ['Oct', 'Dec'], "The open month should be either 'Oct' or 'Dec'."
self.area_name = area_name
if open_month == 'Oct':
self.open_start_date = october_1_start
self.open_end_date = october_31_end
else:
self.open_start_date = december_1_start
self.open_end_date = december_31_end
def transform_string_time_to_datetime(string):
"""
:param string: the string which records the time of the posted tweets
:return: a datetime object which could get access to the year, month, day easily
"""
assert isinstance(string, str)
datetime_object = datetime.strptime(string, '%Y-%m-%d %H:%M:%S+08:00')
final_time_object = datetime_object.replace(tzinfo=time_zone_hk)
return final_time_object
def add_post_variable(string, opening_start_date, opening_end_date, check_window=0):
"""
Add the value of the POST variable in the DID analysis.
In this case, we assume that the introduction of MTR stations immediately changes the tweet sentiment and tweet
activity
:param string: the time string
:param opening_start_date: the opening date of the studied station
:param opening_end_date: the closing date of the studied station
:param check_window: the month window size used to check the temporal effect of the studied station
:return: the post variable based on the time of one tweet
"""
time_object = transform_string_time_to_datetime(string)
if check_window == np.inf:
if time_object > opening_end_date:
return 1
elif time_object < opening_start_date:
return 0
else:
return 'not considered'
else:
left_time_range = opening_start_date - relativedelta(months=check_window)
right_time_range = opening_end_date + relativedelta(months=check_window)
if left_time_range <= time_object < opening_start_date:
return 0
elif opening_end_date < time_object <= right_time_range:
return 1
else:
return 'not considered'
def add_post_variable_lag_effect(string, opening_start_date, opening_end_date, lag_effect_month=0):
"""
Add the value of the POST variable in the DID analysis (Consider the lag effect)
In this case, we believe that the operation of MTR stations does not immediately change the tweet sentiment and
tweet activity. The lag effect exists.
:param string: the time string
:param opening_start_date: the opening date of the studied station
:param opening_end_date: the closing date of the studied station
:param lag_effect_month: the number of lag effect months
:return: the post variable based on the time of one tweet
"""
time_object = transform_string_time_to_datetime(string)
if lag_effect_month == np.inf:
if time_object > opening_end_date:
return 1
elif time_object < opening_start_date:
return 0
else:
return 'not considered'
else:
left_time_range = opening_start_date - relativedelta(months=12)
right_time_range = opening_end_date + relativedelta(months=lag_effect_month + 12)
opening_end_date = opening_end_date + relativedelta(months=lag_effect_month)
if left_time_range <= time_object < opening_start_date:
return 0
elif opening_end_date < time_object <= right_time_range:
return 1
else:
return 'not considered'
def get_population_one_area_combined(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Population_log'] = dataframe.apply(
lambda row: np.log(census_dict['treatment'][0]) if row['T_i_t'] == 1 else np.log(census_dict['control'][0]),
axis=1)
return dataframe
def get_population_one_area_seperate(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Population_log'] = dataframe.apply(lambda row: np.log(census_dict[row['SmallTPU']][0]), axis=1)
return dataframe
def get_median_income_one_area_combined(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Median_Income_log'] = dataframe.apply(
lambda row: np.log(census_dict['treatment'][1]) if row['T_i_t'] == 1 else np.log(census_dict['control'][1]),
axis=1)
return dataframe
def get_median_income_one_area_seperate(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Median_Income_log'] = dataframe.apply(lambda row: np.log(census_dict[row['SmallTPU']][1]), axis=1)
return dataframe
def get_population_three_areas_combined(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
assert 'Area_name' in dataframe, "The dataframe should have one column saving the area name"
assert 'kwun_tong' in census_dict, 'The dictionary should contain whampoa & ho man tin data'
assert 'south_horizons' in census_dict, 'The dictionary should contain south horizons & lei tung data'
assert 'ocean_park' in census_dict, 'The dictionary should contain ocean park & wong chuk hang data'
result_population_log_list = []
dataframe_copy = dataframe.copy()
for index, row in dataframe_copy.iterrows():
if (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong'):
result_population_log_list.append(np.log(census_dict['kwun_tong'][0][0]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong'):
result_population_log_list.append(np.log(census_dict['kwun_tong'][1][0]))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons'):
result_population_log_list.append(np.log(census_dict['south_horizons'][0][0]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons'):
result_population_log_list.append(np.log(census_dict['south_horizons'][1][0]))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park'):
result_population_log_list.append(np.log(census_dict['ocean_park'][0][0]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park'):
result_population_log_list.append(np.log(census_dict['ocean_park'][1][0]))
else:
raise ValueError('Something wrong with the area name...')
dataframe_copy['Population_log'] = result_population_log_list
return dataframe_copy
def get_median_income_three_areas_combined(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the median income data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the median income
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
assert 'Area_name' in dataframe, "The dataframe should have one column saving the area name"
assert 'kwun_tong' in census_dict, 'The dictionary should contain whampoa & ho man tin data'
assert 'south_horizons' in census_dict, 'The dictionary should contain south horizons & lei tung data'
assert 'ocean_park' in census_dict, 'The dictionary should contain ocean park & wong chuk hang data'
result_median_income_log_list = []
dataframe_copy = dataframe.copy()
for index, row in dataframe_copy.iterrows():
if (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong'):
result_median_income_log_list.append(np.log(census_dict['kwun_tong'][0][1]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong'):
result_median_income_log_list.append(np.log(census_dict['kwun_tong'][1][1]))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons'):
result_median_income_log_list.append(np.log(census_dict['south_horizons'][0][1]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons'):
result_median_income_log_list.append(np.log(census_dict['south_horizons'][1][1]))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park'):
result_median_income_log_list.append(np.log(census_dict['ocean_park'][0][1]))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park'):
result_median_income_log_list.append(np.log(census_dict['ocean_park'][1][1]))
else:
raise ValueError('Something wrong with the area name...')
dataframe_copy['Median_Income_log'] = result_median_income_log_list
return dataframe_copy
def build_dataframe_based_on_set(datapath, tpu_set, selected_user_set):
"""
Build the dataframes based on the given tpu set
:param datapath: the datapath saving the tweets posted in each tpu
:param tpu_set: a python set saving the considered tpu names
:param selected_user_set: a set containing the id of users we are interested in
:return: a pandas dataframe saving the tweets posted in the considered tpus
"""
tpu_name_list = []
dataframe_list = []
for tpu in tpu_set:
tpu_name_list.append(tpu)
dataframe = pd.read_csv(os.path.join(datapath, tpu, tpu + '_data.csv'), encoding='utf-8', dtype='str',
quoting=csv.QUOTE_NONNUMERIC)
dataframe['user_id_str'] = dataframe.apply(lambda row: np.int64(float(row['user_id_str'])), axis=1)
dataframe_select = dataframe.loc[dataframe['user_id_str'].isin(selected_user_set)]
dataframe_list.append(dataframe_select)
combined_dataframe = | pd.concat(dataframe_list, axis=0) | pandas.concat |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), pd.Series(actual), check_names=False)
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_natgas_forward_price():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21-G21'))
expected = pd.Series([2.8629152542372878], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='F21-I21')
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='I21')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock,
price_method='GDD',
contract_range='F21')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_get_iso_data():
tz_map = {'MISO': 'US/Central', 'CAISO': 'US/Pacific'}
for key in tz_map:
assert (tm._get_iso_data(key)[0] == tz_map[key])
def test_string_to_date_interval():
assert (tm._string_to_date_interval("K20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("K20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("k20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("k20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("Cal22")['start_date'] == datetime.date(2022, 1, 1))
assert (tm._string_to_date_interval("Cal22")['end_date'] == datetime.date(2022, 12, 31))
assert (tm._string_to_date_interval("Cal2012")['start_date'] == datetime.date(2012, 1, 1))
assert (tm._string_to_date_interval("Cal2012")['end_date'] == datetime.date(2012, 12, 31))
assert (tm._string_to_date_interval("Cal53")['start_date'] == datetime.date(1953, 1, 1))
assert (tm._string_to_date_interval("Cal53")['end_date'] == datetime.date(1953, 12, 31))
assert (tm._string_to_date_interval("2010")['start_date'] == datetime.date(2010, 1, 1))
assert (tm._string_to_date_interval("2010")['end_date'] == datetime.date(2010, 12, 31))
assert (tm._string_to_date_interval("3Q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3Q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2h2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2h2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("3q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2H2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2H2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("Mar2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("Mar2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("March2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("March2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("5Q20") == "Invalid Quarter")
assert (tm._string_to_date_interval("HH2021") == "Invalid num")
assert (tm._string_to_date_interval("3H2021") == "Invalid Half Year")
assert (tm._string_to_date_interval("Cal2a") == "Invalid year")
assert (tm._string_to_date_interval("Marc201") == "Invalid date code")
assert (tm._string_to_date_interval("M1a2021") == "Invalid date code")
assert (tm._string_to_date_interval("Marcha2021") == "Invalid date code")
assert (tm._string_to_date_interval("I20") == "Invalid month")
assert (tm._string_to_date_interval("20") == "Unknown date code")
def test_implied_vol_commod():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_implied_volatility)
mock = Index('MA001', AssetClass.Commod, 'Option NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.implied_volatility(mock,
tenor='F21-H21')
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
def test_fair_price():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
mock2 = Swap('MA002', AssetClass.Commod, 'Swap Oil')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock,
tenor='F21')
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.fair_price(mock,
tenor=None)
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price_swap)
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock2)
assert_series_equal(pd.Series([2.880],
index=[pd.Timestamp('2019-01-02')],
name='fairPrice'),
pd.Series(actual),
)
replace.restore()
def test_weighted_average_valuation_curve_for_calendar_strip():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21-H21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='Invalid',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F20-I20',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='3H20',
query_type=QueryType.PRICE,
measure_field='fairPrice'
)
replace.restore()
def test_fundamental_metrics():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
period = '1y'
direction = tm.FundamentalMetricPeriodDirection.FORWARD
actual = tm.dividend_yield(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.dividend_yield(..., period, direction, real_time=True)
actual = tm.earnings_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share(..., period, direction, real_time=True)
actual = tm.earnings_per_share_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share_positive(..., period, direction, real_time=True)
actual = tm.net_debt_to_ebitda(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.net_debt_to_ebitda(..., period, direction, real_time=True)
actual = tm.price_to_book(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_book(..., period, direction, real_time=True)
actual = tm.price_to_cash(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_cash(..., period, direction, real_time=True)
actual = tm.price_to_earnings(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings(..., period, direction, real_time=True)
actual = tm.price_to_earnings_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings_positive(..., period, direction, real_time=True)
actual = tm.price_to_sales(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_sales(..., period, direction, real_time=True)
actual = tm.return_on_equity(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.return_on_equity(..., period, direction, real_time=True)
actual = tm.sales_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.sales_per_share(..., period, direction, real_time=True)
replace.restore()
def test_central_bank_swap_rate(mocker):
target = {
'meeting_absolute': -0.004550907771,
'meeting_relative': -0.00002833724599999969,
'eoy_absolute': -0.003359767756,
'eoy_relative': 0.001162802769,
'spot': -0.00455
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = mock_meeting_absolute()
actual_abs = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2019, 12, 6))
assert (target['meeting_absolute'] == actual_abs.loc[dt.date(2020, 1, 23)])
assert actual_abs.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_rel = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative',
dt.date(2019, 12, 6))
assert (target['meeting_relative'] == actual_rel.loc[dt.date(2020, 1, 23)])
assert actual_rel.dataset_ids == ('CENTRAL_BANK_WATCH',)
mock_get_data.return_value = mock_ois_spot()
actual_spot = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', dt.date(2019, 12, 6))
assert (target['spot'] == actual_spot.loc[dt.date(2019, 12, 6)])
assert actual_spot.dataset_ids == ('CENTRAL_BANK_WATCH',)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, 'meeting_forward')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', '2019-09-01')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'relative')
with pytest.raises(NotImplementedError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
replace.restore()
def test_policy_rate_expectation(mocker):
target = {
'meeting_number_absolute': -0.004550907771,
'meeting_number_relative': -0.000028337246,
'meeting_date_relative': -0.000028337246,
'meeting_number_spot': -0.004522570525
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mocker.patch.object(Dataset, 'get_data', side_effect=get_data_policy_rate_expectation_mocker)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
assert (target['meeting_number_absolute'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2020, 1, 23))
assert (target['meeting_number_absolute'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
assert_allclose([target['meeting_number_relative']], [actual_num.loc[dt.date(2019, 12, 6)]],
rtol=1e-9, atol=1e-15)
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 0)
assert (target['meeting_number_spot'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '2019-10-24')
assert (target['meeting_number_spot'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
mocker.patch.object(Dataset, 'get_data', side_effect=[mock_meeting_expectation(),
mock_empty_market_data_response()])
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', '5')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5.5)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', dt.date(2019, 9, 1))
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', -2)
with pytest.raises(NotImplementedError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = pd.DataFrame()
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
replace.restore()
def test_realized_volatility():
from gs_quant.timeseries.econometrics import volatility, Returns
from gs_quant.timeseries.statistics import generate_series
random = generate_series(100).rename('spot')
window = 10
type_ = Returns.SIMPLE
replace = Replacer()
market_data = replace('gs_quant.timeseries.measures._market_data_timed', Mock())
return_value = MarketDataResponseFrame(random)
return_value.dataset_ids = _test_datasets
market_data.return_value = return_value
expected = volatility(random, window, type_)
actual = tm.realized_volatility(Cross('MA123', 'ABCXYZ'), window, type_)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_esg_headline_metric():
replace = Replacer()
mock_aapl = Stock('MA4B66MW5E27U9VBB94', 'AAPL')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_esg)
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esNumericScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esPolicyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esProductImpactScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_MOMENTUM_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esMomentumScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_REGIONAL_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gRegionalScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.CONTROVERSY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='controversyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esNumericPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPolicyPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esProductImpactPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_PERCENTILE)
assert_series_equal( | pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='gPercentile') | pandas.Series |
"""
This is the main script of DeepD v0.1
Developed and maintained by Xu lab at https://github.com/DesmondYuan/deepD
For quick start, try this with python scripts/main.py -config=configs/example_GENT_NC.json
The config file should include the following information
"expr_name": (str) Label used as folder name under results
"train_dataset": (str) Location of the dataset that would be further split into training set and validation set with
the "validation_ratio.
"test_dataset": (str) Location of the withheld/test dataset.
"annotation_col": (str) On which column of the input data frame would a supervised model be trained to classify.
"validation_ratio": (float) The training/validation ratio for data partition used for "train_dataset".
"n_genes": (int) Number of genes from the input data.
"unsupervised_layers": (list) A list of layer sizes used for encoders and decoders.
"supervised_layers": (list) A list of layer sizes used for supervised classifier DeepDCancer.
"pretrain_tp2vec": (bool) Whether to perform unsupervised pretraining.
"plot_pretrain_results": (bool) Whether to plot the results after pretraining.
"train_disconnected_classifier": (bool) Whether to perform the disconnected supervised classification (DeepDCancer).
"train_connected_classifier": (bool) Whether to perform the connected supervised classification (DeepDcCancer).
"max_iteration": (int) Maximum number of iterations used for training.
"max_iteration_pretrain": (int) Maximum number of iterations used for pretraining.
n_iter_buffer (int): The moving window for eval losses during training.
n_iter_patience (int): How many iterations without buffered loss on validation dataset decreases would result in
an earlystop in training.
"n_iter_patience_pretrain":How many iterations without buffered loss on validation dataset decreases would result in
an earlystop in pretraining (for each layer).
learning_rate (float): The learning rate for Adam Optimizer. Note that we used the default beta1 and beta2 for Adam.
l1 (float): l1 regularization strength.
l2 (float): l2 regularization strength.
"activation": (tensorflow) Activation function for dense layers.
"optimizer": (tensorflow) Which optimizer would be used for training.
"verbose": (int) export verbose
"listen_freq": (int) Printing training loss for each # of iterations.
"pretrain_batch_size": Batch size for each iteration in pretraining.
"batch_size": Batch size for each iteration in training.
"""
import sys
import os
import shutil
import argparse
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append(os.path.dirname(__file__) + '/..')
from DeepD.model import DeepT2vec, DeepDCancer
from DeepD.utils import md5, get_metrics, plot_reconstruction
from DeepD.train import pretrain, train, session_setup, session_init
from DeepD.data import random_partition
parser = argparse.ArgumentParser(description='DeepD main script')
parser.add_argument('-config', '--experiment_config_path', required=True, type=str, help="Path of experiment config")
parser.add_argument('-seed', '--random_seed', required=False, default=0, type=int, help="Random seed")
args = parser.parse_args()
cfg = json.load(open(args.experiment_config_path, 'r'))
print("[Main] Experiment starts with seed={} and config={}".format(args.random_seed, cfg))
# Loading datasets
print("[Main] Loading datasets...")
train_df = pd.read_csv(cfg['train_dataset'], index_col=0)
test_df = | pd.read_csv(cfg['test_dataset'], index_col=0) | pandas.read_csv |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期低点", "前期低点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期低点日期"] = pd.to_datetime(big_df["前期低点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期低点"] = pd.to_numeric(big_df["前期低点"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续上涨
http://data.10jqka.com.cn/rank/lxsz/
:return: 连续上涨
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_nume | ric(big_df["累计换手率"]) | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
from scipy import stats
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.decomposition import PCA
# utils --------------------------------------------------------------------
def mad(df): # median absolute deviation
return df.subtract(df.median(axis=1), axis=0).abs().median(axis=1)
def score_from_index(func, idx_seq1, idx_seq2, n):
x1 = np.zeros(n)
x2 = np.zeros(n)
x1[idx_seq1] = 1
x2[idx_seq2] = 1
return func(x1, x2)
def score_sfg(sfg_arr, func):
return score_from_index(func, np.arange(500), sfg_arr, 10000)
def jaccard(idx_seq1, idx_seq2):
return len(np.intersect1d(idx_seq1, idx_seq2)) / \
len(np.union1d(idx_seq1, idx_seq2))
def generate_data(n_sample, scale=5, n_gene=10000, n_pos=500):
df_expr = pd.DataFrame(np.random.randn(n_gene, n_sample))
df_ctrl = pd.DataFrame(np.random.randn(n_gene, n_sample))
common_fluc = np.sqrt(scale**2 - 1) * np.random.randn(n_sample)
df_expr[:n_pos] += common_fluc
return df_expr, df_ctrl
def deg(df_expr, df_ctrl, fc_cutoff=1.0):
# differentially expressed genes
diff_sr = df_expr.mean(axis=1) - df_ctrl.mean(axis=1)
return df_expr.index[diff_sr.abs() > fc_cutoff]
def save_gene(gene_arr, map_sr, file_name='gene_list.txt'):
pd.Series(map_sr.loc[gene_arr].dropna().sort_values().unique()).\
to_csv(file_name, index=False)
def calculate_q(p_seq):
p_arr = np.asarray(p_seq)
n_tests = len(p_arr)
sort_index_arr = np.argsort(p_arr)
p_sorted_arr = p_arr[sort_index_arr]
q_arr = p_sorted_arr * n_tests / (np.arange(n_tests) + 1)
q_min = q_arr[-1]
q_list = [q_min]
for q in q_arr[-2::-1]:
if q < q_min:
q_min = q
q_list.append(q_min)
q_arr = np.array(q_list)[::-1]
q_arr[sort_index_arr] = q_arr.copy()
return q_arr
# proposed method 1 --------------------------------------------------------
def two_step(df_expr, df_ctrl, fc_cutoff=2, corr_cutoff=0.75,
cluster_cutoff=0.5, robust=True):
# step 1: deviation filtering
if robust:
df_sub = df_expr[mad(df_expr) > fc_cutoff * mad(df_ctrl)]
else:
df_sub = df_expr[df_expr.std(axis=1) > fc_cutoff * df_ctrl.std(axis=1)]
# step 2: clustering
if robust:
# Spearman's correlation
df_sub = df_sub.T.rank().T
linkage_arr = linkage(df_sub, method='average', metric='correlation')
label_arr = fcluster(linkage_arr, 1-corr_cutoff, criterion='distance')
freq_sr = | pd.Series(label_arr) | pandas.Series |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import pandas as pd
import numpy as np
from datetime import datetime
from scipy.stats.mstats import gmean
from dateutil.relativedelta import relativedelta
import os
from server.models.portfolio.stats import *
from server.models.portfolio.cost import costs
from server.models.portfolio.tiingo import get_data
from server.models.portfolio.optimize import optimize
from server.models.portfolio.bl import bl, get_mkt_cap
from server.models.portfolio.rs import fama_french, regime_switch, current_regime, business_days, expected_returns, covariance
class RNN(nn.Module):
def __init__(self, embed_size,
num_output,
rnn_model = 'GRU',
use_last = True,
padding_index = 0,
hidden_size = 64,
num_layers = 1,
batch_first = True):
super(RNN, self).__init__()
self.use_last = use_last
self.drop_en = nn.Dropout(p = 0.6)
self.end_date = datetime.now().strftime("%Y-%m-%d")
self.start_date = (datetime.strptime(self.end_date, "%Y-%m-%d") - relativedelta(years=6)).strftime("%Y-%m-%d")
self.tickers = list(pd.read_csv(os.getcwd() + r'/data/tickers.csv')['Tickers'])
if rnn_model == 'LSTM':
self.rnn = nn.LSTM(input_size = embed_size, hidden_size = hidden_size,
num_layers = num_layers, dropout = 0.5,
batch_first = True, bidirectional = False)
elif rnn_model == 'GRU':
self.rnn = nn.GRU(input_size = embed_size, hidden_size = hidden_size,
num_layers = num_layers, dropout = 0.5,
batch_first = True, bidirectional = False)
self.bn2 = nn.BatchNorm1d(int(hidden_size))
self.fc = nn.Linear(int(hidden_size), int(num_output))
def forward(self, x):
#x_embed = self.drop_en(x)
#x_embed = nn.functional.dropout(x)
x_embed = x.view(28, x.shape[1], -1)
#packed_input = pack_padded_sequence(x_embed, seq_lengths.cpu().numpy(), batch_first = True)
x_embed = x_embed.type(torch.FloatTensor)
packed_output, ht = self.rnn(x_embed, None)
#out_rnn, _ = pad_packed_sequence(packed_output, batch_first = True)
#row_indices = torch.arange(0, x.size(0)).long()
#col_indices = seq_lengths - 1
#if next(self.parameters()).is_cuda():
# row_indices = row_indices.cuda()
#col_indices = col_indices.cuda()
#if self.use_last:
#last_tensor = out_rnn[row_indices, col_indices, :]
#last_tensor = packed_output[row_indices, :]
#else:
#last_tensor = out_rnn[row_indices, :, :]
#last_tensor = packed_output[row_indices, :]
#last_tensor = torch.mean(last_tensor, dim = 1)
#change labels to predict returns from stock price, but output mu_ml (do this in run_optimization - move it outside)
fc_input = self.bn2(packed_output[-1].view(x.shape[1], -1))
out = self.fc(fc_input)
#out = self.run_optimization(self.end_date, self.start_date, out)
return out.view(-1)
def run_optimization(self, end_date, start_date, mu_ml):
rebalance_date = (datetime.strptime(end_date, "%Y-%m-%d") + relativedelta(months=6, days=1)).strftime("%Y-%m-%d")
rebalance_date = datetime.strftime( | pd.bdate_range(end_date, rebalance_date) | pandas.bdate_range |
#Aqui iremos utilizar um metodo mais sofisticado para calcular o valor de uma acao no mercado de opcoes
#Chamado de discretizacao de Euler
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
from scipy.stats import norm
import matplotlib.pyplot as plt
ticker = 'PG'
data = | pd.DataFrame() | pandas.DataFrame |
import datetime
import json
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
from twilio.rest import Client
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
st.set_page_config(layout='wide', initial_sidebar_state='collapsed')
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def filter_column(df, col, value):
df_temp = deepcopy(df.loc[df[col] == value, :])
return df_temp
def filter_in_stock(df, col):
df_temp = deepcopy(df.loc[df[col] >= 0, :])
return df_temp
def get_location(df, col):
df_temp = deepcopy(df.loc[df[col], :])
return df_temp
rename_mapping = {
'date': 'Date',
'min_age_limit': 'Minimum Age Limit',
'available_capacity': 'Available Capacity',
'pincode': 'Pincode',
'name': 'Hospital Name',
'state_name' : 'State',
'district_name' : 'District',
'block_name': 'Block Name',
'fee_type' : 'Fees'
}
st.title('Manasa says lets get vaccinated!')
st.info('The CoWIN APIs are geo-fenced so sometimes you may not see an output! Please try after sometime ')
left_column_1, right_column_1 = st.beta_columns(2)
with left_column_1:
numdays = st.slider('Select Date Range', 0, 100, 5)
with right_column_1:
PINCODE = st.text_input("Pincode", "560037")
base = datetime.datetime.today()
date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]
date_str = [x.strftime("%d-%m-%Y") for x in date_list]
final_df = None
for INP_DATE in date_str:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByPin?pincode={}&date={}".format(PINCODE, INP_DATE)
response = requests.get(URL)
if (response.ok) and ('centers' in json.loads(response.text)):
resp_json = json.loads(response.text)['centers']
if resp_json is not None:
df = | pd.DataFrame(resp_json) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = | ujson.decode(encoded) | pandas._libs.json.decode |
from __future__ import print_function
import collections
import json
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import keras
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import ShuffleSplit, KFold
import file_utils
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
# import candle
import file_utils
global_cache = {}
SEED = 2018
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
logger = logging.getLogger(__name__)
def set_up_logger(verbose=False):
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
def set_seed(seed=SEED):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
def get_file(url):
fname = os.path.basename(url)
return file_utils.get_file(fname, origin=url, cache_subdir='Pilot1')
def impute_and_scale(df, scaling='std', imputing='mean', dropna='all'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if dropna:
df = df.dropna(axis=1, how=dropna)
else:
empty_cols = df.columns[df.notnull().sum() == 0]
df[empty_cols] = 0
if imputing is None or imputing.lower() == 'none':
mat = df.values
else:
imputer = Imputer(strategy=imputing, axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def discretize(df, col, bins=2, cutoffs=None):
y = df[col]
thresholds = cutoffs
if thresholds is None:
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
df[col] = classes
return df
def save_combined_dose_response():
df1 = load_single_dose_response(combo_format=True, fraction=False)
df2 = load_combo_dose_response(fraction=False)
df = pd.concat([df1, df2])
df.to_csv('combined_drug_growth', index=False, sep='\t')
def load_combined_dose_response(rename=True):
df1 = load_single_dose_response(combo_format=True)
logger.info('Loaded {} single drug dose response measurements'.format(df1.shape[0]))
df2 = load_combo_dose_response()
logger.info('Loaded {} drug pair dose response measurements'.format(df2.shape[0]))
df = pd.concat([df1, df2])
logger.info('Combined dose response data contains sources: {}'.format(df['SOURCE'].unique()))
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2',
'DOSE1': 'Dose1', 'DOSE2': 'Dose2',
'GROWTH': 'Growth', 'STUDY': 'Study'})
return df
def load_single_dose_response(combo_format=False, fraction=True):
# path = get_file(DATA_URL + 'combined_single_drug_growth')
path = get_file(DATA_URL + 'rescaled_combined_single_drug_growth')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na', '-', ''],
# nrows=10,
dtype={'SOURCE': str, 'DRUG_ID': str,
'CELLNAME': str, 'CONCUNIT': str,
'LOG_CONCENTRATION': np.float32,
'EXPID': str, 'GROWTH': np.float32})
global_cache[path] = df
df['DOSE'] = -df['LOG_CONCENTRATION']
df = df.rename(columns={'CELLNAME': 'CELL', 'DRUG_ID': 'DRUG', 'EXPID': 'STUDY'})
df = df[['SOURCE', 'CELL', 'DRUG', 'DOSE', 'GROWTH', 'STUDY']]
if fraction:
df['GROWTH'] /= 100
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1', 'DOSE': 'DOSE1'})
df['DRUG2'] = np.nan
df['DOSE2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df['DOSE2'] = df['DOSE2'].astype(np.float32)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_combo_dose_response(fraction=True):
path = get_file(DATA_URL + 'ComboDrugGrowth_Nov2017.csv')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na', '-', ''],
usecols=['CELLNAME', 'NSC1', 'CONC1', 'NSC2', 'CONC2',
'PERCENTGROWTH', 'VALID', 'SCREENER', 'STUDY'],
# nrows=10000,
dtype={'CELLNAME': str, 'NSC1': str, 'NSC2': str,
'CONC1': np.float32, 'CONC2': np.float32,
'PERCENTGROWTH': np.float32, 'VALID': str,
'SCREENER': str, 'STUDY': str},
error_bad_lines=False, warn_bad_lines=True)
global_cache[path] = df
df = df[df['VALID'] == 'Y']
df['SOURCE'] = 'ALMANAC.' + df['SCREENER']
cellmap_path = get_file(DATA_URL + 'NCI60_CELLNAME_to_Combo.txt')
df_cellmap = pd.read_csv(cellmap_path, sep='\t')
df_cellmap.set_index('Name', inplace=True)
cellmap = df_cellmap[['NCI60.ID']].to_dict()['NCI60.ID']
df['CELL'] = df['CELLNAME'].map(lambda x: cellmap[x])
df['DOSE1'] = -np.log10(df['CONC1'])
df['DOSE2'] = -np.log10(df['CONC2'])
df['DRUG1'] = 'NSC.' + df['NSC1']
df['DRUG2'] = 'NSC.' + df['NSC2']
if fraction:
df['GROWTH'] = df['PERCENTGROWTH'] / 100
else:
df['GROWTH'] = df['PERCENTGROWTH']
df = df[['SOURCE', 'CELL', 'DRUG1', 'DOSE1', 'DRUG2', 'DOSE2', 'GROWTH', 'STUDY']]
return df
def load_aggregated_single_response(target='AUC', min_r2_fit=0.3, max_ec50_se=3, combo_format=False, rename=True):
path = get_file(DATA_URL + 'combined_single_response_agg')
df = global_cache.get(path)
if df is None:
df = pd.read_csv(path, engine='c', sep='\t',
dtype={'SOURCE': str, 'CELL': str, 'DRUG': str, 'STUDY': str,
'AUC': np.float32, 'IC50': np.float32,
'EC50': np.float32, 'EC50se': np.float32,
'R2fit': np.float32, 'Einf': np.float32,
'HS': np.float32, 'AAC1': np.float32,
'AUC1': np.float32, 'DSS1': np.float32})
global_cache[path] = df
total = len(df)
df = df[(df['R2fit'] >= min_r2_fit) & (df['EC50se'] <= max_ec50_se)]
df = df[['SOURCE', 'CELL', 'DRUG', target, 'STUDY']]
df = df[~df[target].isnull()]
logger.info('Loaded %d dose indepdendent response samples (filtered by EC50se <= %f & R2fit >=%f from a total of %d).', len(df), max_ec50_se, min_r2_fit, total)
if combo_format:
df = df.rename(columns={'DRUG': 'DRUG1'})
df['DRUG2'] = np.nan
df['DRUG2'] = df['DRUG2'].astype(object)
df = df[['SOURCE', 'CELL', 'DRUG1', 'DRUG2', target, 'STUDY']]
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG1': 'Drug1', 'DRUG2': 'Drug2', 'STUDY': 'Study'})
else:
if rename:
df = df.rename(columns={'SOURCE': 'Source', 'CELL': 'Sample',
'DRUG': 'Drug', 'STUDY': 'Study'})
return df
def load_drug_data(ncols=None, scaling='std', imputing='mean', dropna=None, add_prefix=True):
df_info = load_drug_info()
df_info['Drug'] = df_info['PUBCHEM']
df_desc = load_drug_set_descriptors(drug_set='Combined_PubChem', ncols=ncols)
df_fp = load_drug_set_fingerprints(drug_set='Combined_PubChem', ncols=ncols)
df_desc = pd.merge(df_info[['ID', 'Drug']], df_desc, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_fp = pd.merge(df_info[['ID', 'Drug']], df_fp, on='Drug').drop('Drug', 1).rename(columns={'ID': 'Drug'})
df_desc2 = load_drug_set_descriptors(drug_set='NCI60', usecols=df_desc.columns.tolist() if ncols else None)
df_fp2 = load_drug_set_fingerprints(drug_set='NCI60', usecols=df_fp.columns.tolist() if ncols else None)
df_desc = pd.concat([df_desc, df_desc2]).reset_index(drop=True)
df1 = | pd.DataFrame(df_desc.loc[:, 'Drug']) | pandas.DataFrame |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, | Timedelta("1D") | pandas.Timedelta |
"""MVTecAd Dataset."""
# default packages
import dataclasses as dc
import enum
import logging
import pathlib
import shutil
import sys
import tarfile
import typing as t
import urllib.request as request
# third party packages
import pandas as pd
# my packages
import src.data.dataset as ds
import src.data.utils as ut
# logger
_logger = logging.getLogger(__name__)
class Kind(enum.Enum):
HAZELNUT = "hazelnut"
@classmethod
def value_of(cls, name: str) -> "Kind":
"""設定値の文字列から Enum 値を返す.
Raises:
ValueError: 指定した文字列が設定値にない場合
Returns:
[type]: Enum の値
"""
for e in Kind:
if e.value == name:
return e
raise ValueError(f"invalid value: {name}")
class MVTecAd(ds.Dataset):
def __init__(self, kind: Kind) -> None:
super().__init__()
archive, datadir = _get_archive_file_name(kind)
self.archive_file = self.path.joinpath(archive)
self.datadir = self.path.joinpath(datadir)
self.train_list = self.path.joinpath(f"{datadir}_train.csv")
self.valid_list = self.path.joinpath(f"{datadir}_valid.csv")
self.test_list = self.path.joinpath(f"{datadir}_test.csv")
def save_dataset(self, reprocess: bool) -> None:
if reprocess:
_logger.info("=== reporcess mode. delete existing data.")
self.archive_file.unlink()
shutil.rmtree(self.datadir)
self.train_list.unlink()
self.valid_list.unlink()
self.test_list.unlink()
self.path.mkdir(exist_ok=True)
if not self.datadir.exists():
if not self.archive_file.exists():
_logger.info("=== download zip file.")
_download(self.archive_file)
_logger.info("=== extract all.")
with tarfile.open(self.archive_file, "r") as tar:
tar.extractall(self.path)
if not self.train_list.exists() and not self.valid_list.exists():
_logger.info("=== create train and valid file list.")
filelist = sorted(
[p.relative_to(self.path) for p in self.datadir.glob("train/**/*.png")]
)
train_ratio = 0.8
train_num = int(len(filelist) * train_ratio)
if not self.train_list.exists():
train_list = | pd.DataFrame({"filepath": filelist[:train_num]}) | pandas.DataFrame |
import gensim
import numpy as np
import pandas as pd
import re
import os
import time
import jieba
import cv2
import json
import urllib
import random
import hashlib
from snownlp import sentiment
from snownlp import SnowNLP
import jieba.posseg as pseg
from gensim.models import word2vec
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models, transforms
from PIL import Image
from tensorflow.keras.applications import vgg19
from tensorflow.keras.applications import resnet50
from tensorflow.keras.preprocessing import image
from collections import Counter
from scipy.linalg import norm
train_csv_path = r'G:\毕设\数据集\微博\train.csv'
text_csv_path = r'G:\毕设\数据集\微博\text.csv'
user_csv_path = r'G:\毕设\数据集\微博\user.csv'
image_csv_path = r'G:\毕设\数据集\微博\image.csv'
en_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_index.json'
cn_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_cn.json'
image_class_vgg19_score_path = r'G:\毕设\数据集\微博\image_class_vgg19.txt'
image_class_resnet50_score_path = r'G:\毕设\数据集\微博\image_class_resnet50.txt'
train_negative_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/negative.txt'
train_positive_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/positive.txt'
sentiment_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/sentiment.marshal'
stopwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/stopwords.txt"
word2vec_txt_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/word2vec_corpus.txt"
word2vec_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/text8.model"
possentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/possentiwords.txt"
negsentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/negsentiwords.txt"
appid = '20190716000318328'
secretKey = '<KEY>'
url_baidu = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
def train_data_read(train_csv_path):
"""
训练数据的读入
df_text 文本信息列
df_user 用户信息列
df_image 图片信息列
"""
logging.info("正在载入数据中...")
#微博信息
df_text = pd.read_csv(train_csv_path,usecols=['id','text','category','label'])
#用户信息
df_user = pd.read_csv(train_csv_path,usecols=['id','userGender','userFollowCount','userFansCount','userWeiboCount','userLocation','userDescription'])
#微博图片信息
df_image = | pd.read_csv(train_csv_path,usecols=['id','piclist']) | pandas.read_csv |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, | pd.Timestamp("2015-01-09") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# # Introduction
#
# Previously I built XG Boost models to predict the main and sub-types of Pokemon from all 7 generations (https://www.kaggle.com/xagor1/pokemon-type-predictions-using-xgb). This was relatively successful, but often stalled at around 70% accuracy per generation, with some much worse. To gain more experience with parameter tuning and feature engineering, I decided to revisit just the 1st Generation, and see if I could improve my results.
# In[2]:
#Load various packages
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import time
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
import xgboost as xgb
from xgboost import plot_importance
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn import metrics
import seaborn as sns
print(os.listdir("../../../input/rounakbanik_pokemon"))
from sklearn.feature_selection import SelectFromModel
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
# # Loading and Modifying Data
#
# To start with, I loaded and modified the data as in the previous kernel.
#
# In contrast to last time, I separated out the numerical and categorical data, and applied one-hot encoding to the latter. This caused the number of features to explode from 24 to 500.
#
# The original plan was to do feature engineering to improve my overall accuracy. However, thus far all my attempts have actually made the predictions worse, so I have left this aside for now.
# In[3]:
#Read data
path = "../../../input/rounakbanik_pokemon/"
egg_df=pd.read_csv(path+"pokemon.csv")
species_df=pd.read_csv(path+"pokemon.csv")
abilities_df=pd.read_csv(path+"pokemon.csv")
#Split duplicates off & combine back
egg2_df=pd.DataFrame.copy(egg_df)
egg2_df=egg_df.loc[egg_df['species_id'].duplicated(), :]
egg_df.drop_duplicates('species_id',inplace=True)
merged = egg_df.merge(egg2_df,on="species_id",how='outer')
merged.fillna(0,inplace=True)
#Rename columns to simpler form.
merged.rename(index=str,columns={"egg_group_id_x":"egg_group_1"},inplace=True)
merged.rename(index=str,columns={"egg_group_id_y":"egg_group_2"},inplace=True)
#Drop last 6 columns
merged.drop(merged.tail(6).index,inplace=True)
#Rename
merged.rename(index=str,columns={"species_id":"pokedex_number"},inplace=True)
#Make a new smaller dataframe
species_trim_df=pd.DataFrame()
species_trim_df["pokedex_number"]=species_df['id']
species_trim_df["color_id"]=species_df['color_id']
species_trim_df["shape_id"]=species_df['shape_id']
species_trim_df["habitat_id"]=species_df['habitat_id']
species_trim_df.drop(species_trim_df.tail(6).index,inplace=True)
#Trim all below Magearna off
abilities_df = abilities_df[abilities_df.pokemon_id < 802]
#Make 3 new columns
abilities_df["Ability1"]=0
abilities_df["Ability2"]=0
abilities_df["Ability3"]=0
#Assign values to the 3 columns based on the ability slot (1-3)
abilities_df["Ability1"] = abilities_df.ability_id.where(abilities_df.slot == 1,0)
abilities_df["Ability2"] = abilities_df.ability_id.where(abilities_df.slot == 2,0)
abilities_df["Ability3"] = abilities_df.ability_id.where(abilities_df.slot == 3,0)
#Split duplicates off into new dataframes
#3 abilities on some means it needs to be split twice
#I'm sure there's an easier way to do this
abilities_df2=pd.DataFrame.copy(abilities_df)
abilities_df2=abilities_df.loc[abilities_df['pokemon_id'].duplicated(), :]
abilities_df.drop_duplicates('pokemon_id',inplace=True)
abilities_df3=pd.DataFrame.copy(abilities_df2)
abilities_df3=abilities_df2.loc[abilities_df2['pokemon_id'].duplicated(), :]
abilities_df2.drop_duplicates('pokemon_id',inplace=True)
#Drop extra columns
abilities_df.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df2.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
abilities_df3.drop(['ability_id','is_hidden','slot'],axis=1,inplace=True)
#Combine everything back
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df2.set_index('pokemon_id'),fill_value=0).reset_index()
abilities_df=abilities_df.set_index('pokemon_id').add(abilities_df3.set_index('pokemon_id'),fill_value=0).reset_index()
#Rename pokemon_id to pokedex number to allow for merging.
abilities_df.rename(index=str,columns={"pokemon_id":"pokedex_number"},inplace=True)
#Read Kaggle data
path = "../../../input/rounakbanik_pokemon/"
pokemon_df= | pd.read_csv(path+"pokemon.csv") | pandas.read_csv |
'''
MIT License
Copyright (c) 2020 MINCIENCIA
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import json
from urllib import request
import sys
import pandas as pd
from datetime import datetime
class traffic:
def __init__(self, user, token):
self.user = user
self.token = token
self.df_clones = pd.DataFrame(columns=['timestamp','count','uniques'])
self.df_views = pd.DataFrame(columns=['timestamp','count','uniques'])
self.now = pd.to_datetime(datetime.now()).strftime('%Y-%m-%d')
self.df_referring = pd.DataFrame(columns=['timestamp','referrer','count','uniques'])
self.df_popular = pd.DataFrame(columns=['timestamp','path','count','uniques'])
def lambda_handler(self):
base_url = "https://api.github.com"
user = self.user
print("Getting repositories for %s" % user)
req = request.Request(
"%s/users/%s/repos" % (base_url, user), headers={"Authorization": "token %s" % self.token}
)
response = request.urlopen(req)
req = json.load(response)
ur = [(r["name"]) for r in req]
print("Found %s repositories" % len(ur))
print(" ")
for repo_name in ur:
print("Getting views for %s" % repo_name)
req = request.Request(
"%s/repos/%s/%s/traffic/views" % (base_url, user, repo_name), headers={"Authorization": "token %s" % self.token}
)
response = request.urlopen(req)
req = json.load(response)
for i in req['views']:
dfi = pd.Series(i)
print(dfi)
self.df_views = self.df_views.append(dfi,ignore_index=True)
print("Getting clones for %s" % repo_name)
req = request.Request(
"%s/repos/%s/%s/traffic/clones" % (base_url, user, repo_name), headers={"Authorization": "token %s" % self.token}
)
response = request.urlopen(req)
req = json.load(response)
for i in req['clones']:
dfi = pd.Series(i)
print(dfi)
self.df_clones = self.df_clones.append(dfi,ignore_index=True)
print("Getting referral data for %s" % repo_name)
referrals = request.Request(
"%s/repos/%s/%s/traffic/popular/referrers" % (base_url, user, repo_name), headers={"Authorization": "token %s" % self.token}
)
referrals = request.urlopen(referrals)
referrals = json.load(referrals)
if len(referrals) > 0:
for ref in referrals:
referred = {
"timestamp": self.now,
"referrer": ref["referrer"],
"count": ref["count"],
"uniques": ref["uniques"],
}
dfi = pd.Series(referred)
print(dfi)
self.df_referring = self.df_referring.append(dfi,ignore_index=True)
print("Getting top referral path data for %s" % repo_name)
paths = request.Request(
"%s/repos/%s/%s/traffic/popular/paths" % (base_url, user, repo_name), headers={"Authorization": "token %s" % self.token}
)
paths = request.urlopen(paths)
paths = json.load(paths)
if len(paths) > 0:
for ref in paths:
paths = {
"timestamp": self.now,
"path": ref["path"],
"count": ref["count"],
"uniques": ref["uniques"],
}
dfi = pd.Series(paths)
print(dfi)
self.df_popular = self.df_popular.append(dfi, ignore_index=True)
def save(self):
#views
self.df_views['timestamp'] = pd.to_datetime(self.df_views['timestamp'], format='%Y-%m-%d').dt.date
self.df_views.to_csv('../input/Traffic/views.csv',mode='a',index=False,header=False)
#clones
self.df_clones['timestamp'] = pd.to_datetime(self.df_clones['timestamp'], format='%Y-%m-%d').dt.date
self.df_clones.to_csv('../input/Traffic/clones.csv',mode='a',index=False,header=False)
#referrers
self.df_referring.to_csv('../input/Traffic/referrers.csv',mode='a', index=False,header=False)
#popular
self.df_popular.to_csv('../input/Traffic/popular.csv',mode='a', index=False,header=False)
def publish(self):
views = pd.read_csv('../input/Traffic/views.csv')
views.sort_values(by=['count'], inplace=True)
views.drop_duplicates(subset=['timestamp'], keep='last', inplace=True)
views.sort_values(by=['timestamp'], inplace=True)
clones = | pd.read_csv('../input/Traffic/clones.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the database functions for SPLAT
"""
# imports: internal
import base64
import copy
import csv
import glob
import os
import re
import requests
from shutil import copyfile
import time
# imports: external
import astropy
import numpy
import pandas
from astropy.io import ascii, fits # for reading in spreadsheet
from astropy.table import Column, Table, join, vstack # for reading in table files
from astropy.time import Time # for reading in table files
from astropy.coordinates import SkyCoord
from astropy import units as u # standard units
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
from astroquery.nist import Nist
from astroquery.xmatch import XMatch
#from astroquery.gaia import Gaia
# splat requirements
import splat
import splat.plot as splot
from splat.initialize import *
from splat.utilities import *
from splat.empirical import estimateDistance, typeToColor
#from splat import DB_SOURCES, DB_SPECTRA
#import splat as spl
# Python 2->3 fix for input
try: input=raw_input
except NameError: pass
# set timeout limits to 1 minute
Simbad.TIMEOUT = 60
Vizier.TIMEOUT = 60
Nist.TIMEOUT = 60
XMatch.TIMEOUT = 180
#####################################################
########### DATABASE QUERY AND ACCESS ###########
#####################################################
def prepDB(db_init,force=False):
'''
Prep a pandas database for DESIGNATION join
Populates RA, DEC, DESIGNATION and SHORTNAME columns if not present
Requires RA, DEC or DESIGNATION to be present
'''
db = copy.deepcopy(db_init)
if 'RA' not in list(db.columns) or 'DEC' not in list(db.columns):
if 'DESIGNATION' not in list(db.columns):
raise ValueError('Database must have columns RA and DEC, or DESIGNATION')
else:
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db['DESIGNATION']]
if not isinstance(db['RA'].iloc[0],float):
db['RA'] = [c.ra.degree for c in db['COORDINATES']]
db['DEC'] = [c.dec.degree for c in db['COORDINATES']]
if 'DESIGNATION' not in list(db.columns):
db['DESIGNATION'] = [splat.coordinateToDesignation([db['RA'].iloc[i],db['DEC'].iloc[i]]) for i in range(len(db))]
if 'COORDINATES' not in list(db.columns):
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db['DESIGNATION']]
# if 'SHORTNAME' not in list(db.columns):
# db['SHORTNAME'] = [splat.designationToShortName(d) for d in db['DESIGNATION']]
# force COORDINATES, RA, DEC if desired
if force == True:
db['COORDINATES'] = [splat.designationToCoordinate(d) for d in db['DESIGNATION']]
db['RA'] = [c.ra.degree for c in db['COORDINATES']]
db['DEC'] = [c.dec.degree for c in db['COORDINATES']]
# db['SHORTNAME'] = [splat.designationToShortName(d) for d in db['DESIGNATION']]
return db
def fetchDatabase(*args, **kwargs):
'''
:Purpose: Get the SpeX Database from either online repository or local drive
'''
filename = 'db_spexprism.txt' # temporary original database file for backwards compatability
if len(args) > 0:
filename = args[0]
kwargs['filename'] = kwargs.get('filename',filename)
kwargs['filename'] = kwargs.get('file',kwargs['filename'])
kwargs['folder'] = kwargs.get('folder',SPLAT_PATH+DB_FOLDER)
url = kwargs.get('url',SPLAT_URL)+kwargs['folder']
local = kwargs.get('local',True)
online = kwargs.get('online',not local and checkOnline())
local = not online
kwargs['local'] = local
kwargs['online'] = online
kwargs['model'] = True
# determine format of file
delimiter = kwargs.get('delimiter','')
fmt = kwargs.get('format','')
fmt = kwargs.get('fmt',fmt)
if delimiter == ',' or delimiter == 'comma' or delimiter == 'csv' or kwargs.get('comma',False) == True or ('.csv' in kwargs['filename']):
delimiter = ','
fmt = 'csv'
if delimiter == '\t' or delimiter == 'tab' or kwargs.get('tab',False) == True or ('.txt' in kwargs['filename']):
delimiter = '\t'
fmt = 'tab'
if fmt == '':
raise NameError('\nCould not determine the file format of '+kwargs['filename']+'; please specify using format or delimiter keywords\n\n')
# check that folder/set is present either locally or online
# if not present locally but present online, switch to this mode
# if not present at either raise error
folder = checkLocal(kwargs['folder'])
if folder=='':
folder = checkOnlineFile(kwargs['folder'])
if folder=='':
raise NameError('\nCould not find '+kwargs['folder']+' locally or on SPLAT website\n\n')
else:
kwargs['folder'] = folder
kwargs['local'] = False
kwargs['online'] = True
else:
kwargs['folder'] = folder
# locally:
if kwargs['local']:
# print('Reading local')
infile = checkLocal(kwargs['filename'])
if infile=='':
infile = checkLocal(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' locally\n\n')
else:
try:
data = ascii.read(os.path.normpath(infile), delimiter=delimiter,fill_values='-99.',format=fmt)
# data = ascii.read(infile, delimiter='\t',fill_values='-99.',format='tab')
except:
raise NameError('\nCould not load {}: this may be a decoding error\n'.format(infile))
# check if file is present; if so, read it in, otherwise go to interpolated
# online:
if kwargs['online']:
# print('Reading online')
infile = checkOnlineFile(kwargs['filename'])
if infile=='':
infile = checkOnlineFile(kwargs['folder']+'/'+kwargs['filename'])
if infile=='':
raise NameError('\nCould not find '+kwargs['filename']+' on the SPLAT website\n\n')
try:
# open(os.path.basename(TMPFILENAME), 'wb').write(urllib2.urlopen(url+infile).read())
open(os.path.basename(TMPFILENAME), 'wb').write(requests.get(url+infile).content)
kwargs['filename'] = os.path.basename(tmp)
data = ascii.read(os.path.basename(TMPFILENAME), delimiter=delimiter,fill_values='-99.',format=fmt)
os.remove(os.path.basename(TMPFILENAME))
except:
raise NameError('\nHaving a problem reading in '+kwargs['filename']+' on the SPLAT website\n\n')
return data
#####################################################
########### ADDING NEW SPECTRA TO SPLAT ##########
#####################################################
def addUserSpectra(folder='./',instrument='SPEX-PRISM',mode='update',repeat='retain',radius_repeat=10.*u.arcsec,input_file='input.txt',search_str='*.fits',sources_data_file=DB_SOURCES_FILE,spectra_data_file=DB_SPECTRA_FILE,verbose=True,*args):
'''
:Purpose:
Adds in local spectral data to the underlying SPLAT library
This program is currently UNDER DEVELOPMENT
'''
# program constants
optional_spectra_columns = ['PUBLISHED','DATA_BIBCODE','PROGRAM_PI','OBSERVATION_DATE','OBSERVATION_MJD','OBSERVATION_TIME','OBSERVER','AIRMASS']
optional_sources_columns = ['NAME','DESIGNATION','RA','DEC','COORDINATES','DISCOVERY_REF','SPT','SPT_REF','SPT_OPT','SPT_OPT_REF','SPT_NIR','SPT_NIR_REF','SPT_LIT','SPT_LIT_REF','LUMINOSITY_CLASS','METALLICITY_CLASS','GRAVITY_CLASS_OPTICAL','GRAVITY_CLASS_OPTICAL_REF','GRAVITY_CLASS_NIR','GRAVITY_CLASS_NIR_REF','CLUSTER','CLUSTER_REF','BINARY','BINARY_TYPE','BINARY_REF','SBINARY','SBINARY_REF','COMPANION_NAME','COMPANION_REF']
header_spectra_columns = {
'OBSERVATION_DATE': ['OBS_DATE','OBS-DATE','UT-DATE'],
'OBSERVATION_TIME': ['OBS_TIME','OBS-TIME','UT-TIME'],
'OBSERVER': [],
'AIRMASS': ['Z'],
'SLIT': ['APERTURE'],
'DISPERSER': ['GRATING','GRISM','DISPERSE'],
}
header_sources_columns = {
'NAME': ['OBJECT','SOURCE','TARGET'],
'RA': ['RA-D','RADEG'],
'DEC': ['DEC-D','DECDEG'],
}
dataset_number_factor = 1e6
now = time.localtime()
nowstr = str(now.tm_year)+str(now.tm_mon)+str(now.tm_mday)
if len(args) > 0:
folder = args[0]
if len(args) > 1:
instrument = args[1]
##### STOPPED HERE #####
# check if this has already been read in
# if folder in DATA_FOLDERS:
# n =
# check instrument
inst = splat.checkInstrument(instrument)
if inst != False: instrument = inst
# check mode and repeat
mode_labels = ['new','append','refresh','update']
if mode.lower() not in mode_labels:
if verbose==True: print('\nDo not recognize mode = {}; should be one of {}; reverting to update'.format(mode,mode_labels))
mode = 'update'
repeat_labels = ['replace','assert','retain','keep']
if repeat.lower() not in repeat_labels:
if verbose==True: print('\nDo not recognize repeat = {}; should be one of {}; reverting to retain'.format(repeat,repeat_labels))
repeat = 'retain'
# check the folder is correctly specified
if not os.path.exists(folder):
print('\nCould not find folder {} in local directory structure; skipping')
return
# check if spectra data file is present; if not, you'll need to generate a new one
if spectra_data_file not in os.listdir(folder):
if verbose == True: print('\nCannot find spectral data file {}; generating a new one from input files'.format(spectra_data_file))
mode = 'new'
# STAGE 1: SET UP A NEW FOLDER OF DATA
if mode.lower() == 'new':
# check if input file is in place; if not, make one
if input_file not in os.listdir(folder):
files = glob.glob(folder+'/'+search_str)
files = [os.path.basename(f) for f in files]
for f in [input_file,sources_data_file,spectra_data_file]:
if f in files: files.remove(f)
# turn into preliminary input.txt file
input_db = pandas.DataFrame()
input_db['DATA_FILE'] = files
input_db['INSTRUMENT'] = [instrument]*len(files)
if '.txt' in input_file: input_db.to_csv(folder+'/'+input_file,sep='\t',index=False)
elif '.csv' in input_file: input_db.to_csv(folder+'/'+input_file,sep=',',index=False)
elif '.xls' in input_file: input_db.to_excel(folder+'/'+input_file,index=False)
else: raise ValueError('\nDo not recognize file format for {}'.format(input_file))
# prompt to continue?
# read in input file and start building spectral database
if '.txt' in input_file: input_db = pandas.read_csv(folder+'/'+input_file,delimiter='\t')
elif '.csv' in input_file: input_db = | pandas.read_csv(folder+'/'+input_file,delimiter=',') | pandas.read_csv |
import pandas as pd
from simple_network_sim import network_of_populations, sampleUseOfModel, hdf5_to_csv
from tests.utils import create_baseline
def test_cli_run(base_data_dir):
try:
sampleUseOfModel.main(["-c", str(base_data_dir / "config.yaml")])
h5_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.h5"
csv_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.csv"
hdf5_to_csv.main([str(h5_file), str(csv_file)])
baseline = create_baseline(csv_file)
test_df = pd.read_csv(csv_file)
baseline_df = pd.read_csv(baseline)
pd.testing.assert_frame_equal(
test_df.set_index(["date", "node", "age", "state"]),
baseline_df.set_index(["date", "node", "age", "state"]),
check_like=True,
)
finally:
# TODO; remove this once https://github.com/ScottishCovidResponse/data_pipeline_api/issues/12 is done
(base_data_dir / "access.log").unlink()
h5_file.unlink()
csv_file.unlink()
def test_stochastic_cli_run(base_data_dir):
try:
sampleUseOfModel.main(["-c", str(base_data_dir / "config_stochastic.yaml")])
h5_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.h5"
csv_file = base_data_dir / "output" / "simple_network_sim" / "outbreak-timeseries" / "data.csv"
hdf5_to_csv.main([str(h5_file), str(csv_file)])
baseline = create_baseline(csv_file)
test_df = pd.read_csv(csv_file)
baseline_df = pd.read_csv(baseline)
pd.testing.assert_frame_equal(
test_df.set_index(["date", "node", "age", "state"]),
baseline_df.set_index(["date", "node", "age", "state"]),
check_like=True,
)
finally:
# TODO; remove this once https://github.com/ScottishCovidResponse/data_pipeline_api/issues/12 is done
(base_data_dir / "access.log").unlink()
h5_file.unlink()
csv_file.unlink()
def test_stochastic_seed_sequence(data_api_stochastic):
network, _ = network_of_populations.createNetworkOfPopulation(
data_api_stochastic.read_table("human/compartment-transition", "compartment-transition"),
data_api_stochastic.read_table("human/population", "population"),
data_api_stochastic.read_table("human/commutes", "commutes"),
data_api_stochastic.read_table("human/mixing-matrix", "mixing-matrix"),
data_api_stochastic.read_table("human/infectious-compartments", "infectious-compartments"),
data_api_stochastic.read_table("human/infection-probability", "infection-probability"),
data_api_stochastic.read_table("human/initial-infections", "initial-infections"),
pd.DataFrame({"Value": [2]}),
data_api_stochastic.read_table("human/start-end-date", "start-end-date"),
data_api_stochastic.read_table("human/movement-multipliers", "movement-multipliers"),
| pd.DataFrame({"Value": [True]}) | pandas.DataFrame |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import orca
import pandas as pd
from activitysim.core import simulate as asim
from activitysim.core import tracing
from activitysim.core import pipeline
logger = logging.getLogger(__name__)
@orca.table()
def households(store, households_sample_size, trace_hh_id):
df_full = store["households"]
# if we are tracing hh exclusively
if trace_hh_id and households_sample_size == 1:
# df contains only trace_hh (or empty if not in full store)
df = tracing.slice_ids(df_full, trace_hh_id)
# if we need sample a subset of full store
elif households_sample_size > 0 and len(df_full.index) > households_sample_size:
# take the requested random sample
df = asim.random_rows(df_full, households_sample_size)
# if tracing and we missed trace_hh in sample, but it is in full store
if trace_hh_id and trace_hh_id not in df.index and trace_hh_id in df_full.index:
# replace first hh in sample with trace_hh
logger.debug("replacing household %s with %s in household sample" %
(df.index[0], trace_hh_id))
df_hh = tracing.slice_ids(df_full, trace_hh_id)
df = pd.concat([df_hh, df[1:]])
else:
df = df_full
logger.info("loaded households %s" % (df.shape,))
# replace table function with dataframe
orca.add_table('households', df)
pipeline.get_rn_generator().add_channel(df, 'households')
if trace_hh_id:
tracing.register_traceable_table('households', df)
tracing.trace_df(df, "households", warn_if_empty=True)
return df
# this assigns a chunk_id to each household so we can iterate over persons by whole households
@orca.column("households", cache=True)
def chunk_id(households):
# FIXME - pathological knowledge of name of chunk_id column used by hh_chunked_choosers
chunk_ids = pd.Series(range(len(households)), households.index)
return chunk_ids
@orca.column('households')
def work_tour_auto_time_savings(households):
# FIXME - fix this variable from auto ownership model
return pd.Series(0, households.index)
# this is the placeholder for all the columns to update after the
# workplace location choice model
@orca.table()
def households_cdap(households):
return | pd.DataFrame(index=households.index) | pandas.DataFrame |
# standard lib
from collections.abc import Sequence
import json
# 3rd party lib
import pandas as pd
# local lib
from .base_rule import BaseRule, SAMPLE_RULES
@SAMPLE_RULES.register_module('sequential')
class Sequential(BaseRule):
def __init__(self, rules):
self._rules = rules
def _apply(self, obj):
for r in self._rules:
obj = r(obj)
return obj
def __repr__(self):
return self.__class__.__name__ + '([\n\t' + \
',\n\t'.join([repr(v) for v in self._rules]) + '\n])'
if __name__ == '__main__':
import numpy as np
from pprint import pprint
from .eval_rule import EvalRule
from .parallel import Parallel
meta = {
'scale': np.arange(0, 1000, 100),
'depth': np.arange(2, 22, 2),
'latency': np.linspace(0, 20, 10),
}
a = | pd.DataFrame(meta) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import LabelBinarizer
def classification_report(y_true, y_pred, y_score=None, average='micro'):
"""
Params:
--------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
y_score : nd array-like with the probabilities of the classes.
average : str. either 'micro' or 'macro', for more details
of how they are computed see:
http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#multiclass-settings
Return:
--------
pd.DataFrame : contains the classification report as pandas.DataFrame
Example:
---------
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=5000, n_features=10,
n_informative=5, n_redundant=0,
n_classes=10, random_state=0,
shuffle=False)
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = RandomForestClassifier(max_depth=2, random_state=0)
model.fit(X_train, y_train)
sk_report = classification_report(
digits=6,
y_true=y_test,
y_pred=model.predict(X_test))
report_with_auc = class_report(
y_true=y_test,
y_pred=model.predict(X_test),
y_score=model.predict_proba(X_test))
print(sk_report)
Out:
precision recall f1-score support
0 0.267101 0.645669 0.377880 127
1 0.361905 0.290076 0.322034 131
2 0.408451 0.243697 0.305263 119
3 0.345455 0.327586 0.336283 116
4 0.445652 0.333333 0.381395 123
5 0.413793 0.095238 0.154839 126
6 0.428571 0.474820 0.450512 139
7 0.446809 0.169355 0.245614 124
8 0.302703 0.466667 0.367213 120
9 0.373333 0.448000 0.407273 125
avg / total 0.379944 0.351200 0.335989 1250
print(report_with_auc)
Out:
precision recall f1-score support pred AUC
0 0.267101 0.645669 0.377880 127.0 307.0 0.810550
1 0.361905 0.290076 0.322034 131.0 105.0 0.777579
2 0.408451 0.243697 0.305263 119.0 71.0 0.823277
3 0.345455 0.327586 0.336283 116.0 110.0 0.844390
4 0.445652 0.333333 0.381395 123.0 92.0 0.811389
5 0.413793 0.095238 0.154839 126.0 29.0 0.654790
6 0.428571 0.474820 0.450512 139.0 154.0 0.876458
7 0.446809 0.169355 0.245614 124.0 47.0 0.777237
8 0.302703 0.466667 0.367213 120.0 185.0 0.799735
9 0.373333 0.448000 0.407273 125.0 150.0 0.825959
avg / total 0.379944 0.351200 0.335989 1250.0 1250.0 0.800534
"""
if y_true.shape != y_pred.shape:
print("Error! y_true %s is not the same shape as y_pred %s" % (
y_true.shape,
y_pred.shape)
)
return
lb = LabelBinarizer()
if len(y_true.shape) == 1:
lb.fit(y_true)
# Value counts of predictions
labels, cnt = np.unique(
y_pred,
return_counts=True)
n_classes = len(labels)
all_labels = set(labels).union(np.unique(y_true))
pred_cnt = | pd.Series(cnt, index=labels) | pandas.Series |
#%%
# import labraries
import matplotlib.pyplot as plt
import pandas as pd
import time
import os
#%%
# preprocess time of active power
def preprocess_active_power(df_act):
start_idx, end_idx = 0, 0
for i in range(0, int(df_act.shape[0]/2)):
if df_act['Item005'][i]==0 and df_act['Item005'][i+1]!=0:
start_idx = i
break
for i in range(df_act.shape[0]-1, int(df_act.shape[0]/2), -1):
if df_act['Item005'][i]==0 and df_act['Item005'][i-1]!=0:
end_idx = i
break
df_act['DataSavedTime'][:end_idx-start_idx+1] = df_act['DataSavedTime'][start_idx:end_idx+1]
df_act['Item005'][:end_idx-start_idx+1] = df_act['Item005'][start_idx:end_idx+1]
df_result = pd.concat([df_act['DataSavedTime'][:end_idx-start_idx+1], df_act['Item005'][:end_idx-start_idx+1]], axis=1)
return df_result
#%%
# read labeled data
label_directories = [f for f in os.listdir('./elevator_label/') if not '.' in f] # labels
label_cur, label_act = {}, {}
for i, l in enumerate(label_directories):
label_data = [f for f in os.listdir('./elevator_label/'+l+'/') if not 'png' in f] # get only csv files
act_csv = [f for f in label_data if 'active' in f]
cur_csv = [f for f in label_data if not 'active' in f and not '.ini' in f]
print(l, " ==> ", len(act_csv)==len(cur_csv))
label_cur[l] = cur_csv
label_act[l] = act_csv
#%%
# find max time duration
cur_max_duration, act_max_duration = {}, {}
cur_min_duration, act_min_duration = {}, {} # to check error data
error_data = ''
for i, l in enumerate(label_directories):
path = './elevator_label/'+l+'/'
max_cur, max_act = 0, 0
min_cur, min_act = 500, 500
temp = ''
for j in range(len(label_cur[l])):
df_cur = pd.read_csv(path+label_cur[l][j])
df_act = pd.read_csv(path+label_act[l][j])
if df_cur.shape[1]<3:
df_cur.columns = ['DataSavedTime', 'Item005']
else:
df_cur.columns = ['Index', 'DataSavedTime', 'Item005']
cur_time_duration = df_cur.shape[0]
act_time_duration = df_act.shape[0]
if max_cur<cur_time_duration:
max_cur = cur_time_duration
temp = path+label_cur[l][j]
if max_act<act_time_duration:
max_act = act_time_duration
if min_cur>=cur_time_duration:
min_cur = cur_time_duration
error_data = path+label_cur[l][j]
if min_act>=act_time_duration:
min_act = act_time_duration
cur_max_duration[l] = max_cur
act_max_duration[l] = max_act
cur_min_duration[l] = min_cur
act_min_duration[l] = min_act
print('cur_max_duration', '='*20, '\n', cur_max_duration)
print('cur_min_duration', '='*20, '\n', cur_min_duration)
print('act_max_duration', '='*20, '\n', act_max_duration)
print('act_min_duration', '='*20, '\n', act_min_duration)
#%%
# plot graphs of current in one figure
target_label = ['012', '052']
for i, l in enumerate(label_directories):
if not l in target_label:
continue
path = './elevator_label/'+l+'/'
plt.figure()
for j in range(len(label_cur[l])):
df_cur = | pd.read_csv(path+label_cur[l][j]) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.