prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Output/model results file interfaces.
All defined classes are attached to project and run instances as
propertyplugin that return a pandas.DataFrame. For files to be read from the
SWIM project, a from_project method needs to be defined. To read the data from
a run instance, a method refering to the extension of a file saved as
run file needs to be defined (e.g. from_csv) or from_run to overwrite the
file selection.
Conventions
-----------
- class and method names should be lowercase, words separated by _ and
descriptions should be singular (subbasin rather than subbasins)
- name word order: spatial domain (catchment, subbasin, hydrotope, station
etc.), timestep adjective (daily, monthly, annually, average), variable
and/or other descriptions. Pattern: domain_timestep_variable[_description...]
- all read ``from_*`` methods should parse any keyword to the pandas.read call
"""
import os.path as osp
from glob import glob
import datetime as dt
import calendar
import warnings
import inspect
import numpy as np
import pandas as pd
from modelmanager.utils import propertyplugin
from modelmanager.plugins.pandas import ProjectOrRunData
from swimpy import utils, plot, hydro
from swimpy.plot import plot_function as _plot_function
from swimpy.grass import _subbasin_or_hydrotope_values_to_raster
RESDIR = 'output/Res'
GISDIR = 'output/GIS'
class station_daily_discharge(ProjectOrRunData):
"""
Daily discharge of selected stations.
"""
path = osp.join(RESDIR, 'Q_gauges_sel_sub_routed_m3s.csv')
plugin = ['plot', 'plot_regime', 'plot_flow_duration_polar']
@staticmethod
def from_project(path, **readkwargs):
readkwargs.setdefault("skipinitialspace", True)
df = pd.read_csv(path, **readkwargs)
dtms = [dt.date(y, 1, 1) + dt.timedelta(d - 1)
for y, d in zip(df.pop('YEAR'), df.pop('DAY'))]
df.index = pd.PeriodIndex(dtms, freq='d', name='time')
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='d')
return df
def _default_stations(self, stations=None):
if stations is None:
dstations = self.columns[1:] # first column is observed
else:
assert type(stations) == str or len(stations) > 0
dstations = [stations] if type(stations) == str else stations
return dstations
@_plot_function
def plot(self, stations=None, freq='d', minmax=False,
observed=False, ax=None, runs=None, output=None, **linekw):
"""Line plot of daily discharge of selected stations.
Arguments
---------
stations : None | str | iterable
Only show single (str) or subset (iterable) of stations. If None,
show all found in file.
freq : <pandas frequency>
Any pandas frequency to aggregate to.
observed : bool
Add line for observed discharge. stations.daily_discharge_observed
must be configured.
**linekw :
Parse any keyword to the line plot function.
"""
stations = self._default_stations(stations)
data = utils.aggregate_time(self[stations], freq=freq)
# plot observed
if observed:
obs = utils.aggregate_time(
(self.project.stations.daily_discharge_observed
.loc[self.index, stations]), freq=freq)
clrs = plot.default_colors(len(stations), linekw.get('colors', []))
for c, s in zip(clrs, stations):
plot.plot_discharge(obs[s], ax, linestyle='--', color=c)
for s in stations:
# add label if multiple runs
if runs and len(runs[0]) > 1:
qs, i = runs
lab = '%s ' % qs[i] + ('' if len(stations) == 1 else str(s))
linekw['label'] = lab
line = plot.plot_discharge(data[s], ax, **linekw)
return line
@_plot_function
def plot_regime(self, stations=None, freq='d', minmax=False,
observed=False, ax=None, runs=None, output=None, **linekw):
"""Line plot of daily discharge of selected stations.
Arguments
---------
stations : None | str | iterable
Only show single (str) or subset (iterable) of stations. If None,
show all found in file.
freq : str
Regime frequency, d (daily) or m (monthly).
minmax : bool | dict
Show min-max range. May be a dictionary kwargs
parsed to ax.fill_between.
observed : bool
Add line for observed discharge. stations.daily_discharge_observed
must be configured.
**linekw :
Parse any keyword to the line plot function.
"""
stations = self._default_stations(stations)
data = {}
for st in ['mean'] + (['min', 'max'] if minmax else []):
data[st] = utils.aggregate_time(self[stations], regime=True,
freq=freq, regime_method=st)
# show range first if required
if minmax:
for s in stations:
fbkw = minmax if type(minmax) == dict else {}
fbkw.setdefault("alpha", 0.5)
ax.fill_between(data['min'][s].index, data['max'][s], **fbkw)
# plot observed
if observed:
obs = utils.aggregate_time(
(self.project.stations.daily_discharge_observed
.loc[self.index, stations]), regime=True, freq=freq)
clrs = plot.default_colors(len(stations), linekw.get('colors', []))
for c, s in zip(clrs, stations):
plot.plot_discharge(obs[s], ax, linestyle='--', color=c)
for s in stations:
# add label if multiple runs
if runs and len(runs[0]) > 1:
qs, i = runs
lab = '%s ' % qs[i] + ('' if len(stations) == 1 else str(s))
linekw['label'] = lab
line = plot.plot_discharge(data['mean'][s], ax, **linekw)
xlabs = {'d': 'Day of year', 'm': 'Month'}
ax.set_xlabel(xlabs[freq])
if freq == 'm':
ax.set_xticklabels([s[0] for s in calendar.month_abbr[1:]])
ax.set_xticks(range(1, 12+1))
ax.set_xlim(1, 12)
elif freq == 'd':
nd = np.array(calendar.mdays).cumsum()
nd[:-1] += 1
ax.set_xticks(nd)
ax.set_xlim(1, 365)
return line
@_plot_function
def plot_flow_duration(self, stations=None, ax=None, runs=None,
output=None, **linekw):
stations = self._default_stations(stations)
lines = []
for s in stations:
fd = hydro.flow_duration(self[s])
line = plot.plot_flow_duration(fd, ax=ax, **linekw)
lines.append(line)
return
@_plot_function
def plot_flow_duration_polar(self, station, percentilestep=10, freq='m',
colormap='jet_r', ax=None, runs=None,
output=None, **barkw):
"""Plot flow duration on a wheel of month or days of year.
Arguments
---------
station : str
A single station label (not possible for multiple stations).
percentilestep : % <= 50
Intervals of flow duration of 100%.
freq : 'm' | 'd'
Duration per month or day of year.
colormap : str
Matplotlib to use for the colour shading.
"""
if runs:
assert len(runs[0]) == 1
ax = plot.plot_flow_duration_polar(self[station], freq=freq, ax=ax,
percentilestep=percentilestep,
colormap=colormap, **barkw)
return ax
def peak_over_threshold(self, percentile=1, threshold=None, maxgap=None,
stations=None):
"""Identify peaks over a threshold, return max, length, date and recurrence.
Arguments
---------
percentile : number
The percentile threshold of q., e.g. 1 means Q1.
threshold : number, optional
Absolute threshold to use for peak identification.
maxgap : int, optional
Largest gap between two threshold exceedance periods to count as
single flood event. Number of timesteps. If not given, every
exceedance is counted as individual flood event.
stations : stationID | list of stationIDs
Return subset of stations. Default all.
Returns
-------
pd.DataFrame :
Peak discharge ordered dataframe with order index and peak q,
length, peak date and recurrence columns with MultiIndex if more
than one station is selected.
"""
stations = self._default_stations(stations)
kw = dict(percentile=percentile, threshold=threshold, maxgap=maxgap)
pot = [hydro.peak_over_threshold(self[s], **kw) for s in stations]
return pot[0] if len(stations) == 1 else pd.concat(pot, keys=stations)
def obs_sim_overlap(self, warmupyears=1):
"""Return overlapping obs and sim dataframes excluding warmup period.
Arguments
---------
warmupyears : int
Number of years to skip at beginng as warm up period.
Returns
-------
(pd.DataFrame, pd.DataFrame) : observed and simulated discharge.
"""
obs = self.project.stations.daily_discharge_observed
# exclude warmup period
sim = self[str(self.index[0].year+warmupyears):]
obsa, sima = obs.align(sim, join='inner')
# obsa can still have columns with only NAs
obsa.dropna(how='all', axis=1, inplace=True)
return obsa, sima[obsa.columns]
@property
def NSE(self):
"""pandas.Series of Nash-Sutcliff efficiency excluding warmup year."""
obs, sim = self.obs_sim_overlap()
return pd.Series({s: hydro.NSE(obs[s], sim[s]) for s in obs.columns})
@property
def rNSE(self):
"""pandas.Series of reverse Nash-Sutcliff efficiency (best = 0)"""
return 1 - self.NSE
@property
def pbias(self):
"""pandas.Series of percent bias excluding warmup year."""
obs, sim = self.obs_sim_overlap()
return pd.Series({s: hydro.pbias(obs[s], sim[s]) for s in obs.columns})
@property
def pbias_abs(self):
"""pandas.Series of absolute percent bias excluding warmup year."""
return self.pbias.abs()
class subbasin_daily_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'subd.prn')
plugin = ['to_raster']
@staticmethod
def from_project(path, **readkwargs):
def parse_time(y, d):
dto = dt.date(int(y), 1, 1) + dt.timedelta(int(d) - 1)
return pd.Period(dto, freq='d')
d = pd.read_csv(path, delim_whitespace=True, date_parser=parse_time,
parse_dates=[[0, 1]], index_col=[0, 1], **readkwargs)
d.index.names = ['time', 'subbasinID']
return d
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1], parse_dates=[0],
date_parser=pd.Period, **readkwargs)
return df
def to_raster(self, variable, timestep=None, prefix=None, name=None,
strds=True, mapset=None):
# extra argument
"""variable : str
Selected variable (will be appended to default prefix).
"""
prefix = prefix or self.__class__.__name__ + '_' + variable.lower()
_subbasin_or_hydrotope_values_to_raster(
self.project, self[variable].unstack(),
self.project.subbasins.reclass, timestep=timestep, name=name,
prefix=prefix, strds=strds, mapset=mapset)
return
to_raster.__doc__ = (_subbasin_or_hydrotope_values_to_raster.__doc__ +
to_raster.__doc__)
class subbasin_monthly_waterbalance(subbasin_daily_waterbalance):
path = osp.join(RESDIR, 'subm.prn')
def from_project(self, path, **readkwargs):
styr = self.project.config_parameters['iyr']
def parse_time(y, m):
return pd.Period('%04i-%02i' % (styr+int(y)-1, int(m)), freq='m')
with open(path) as f:
header = f.readline().split()
df = pd.read_csv(f, delim_whitespace=True, skiprows=1, header=None,
index_col=[0, 1], date_parser=parse_time,
parse_dates=[[0, 1]], names=header, **readkwargs)
df.index.names = ['time', 'subbasinID']
return df
class subbasin_daily_discharge(ProjectOrRunData):
path = osp.join(RESDIR, 'Q_gauges_all_sub_routed_m3s.csv')
@staticmethod
def from_project(path, **readkwargs):
df = pd.read_csv(path, delim_whitespace=True, index_col=[0, 1],
**readkwargs)
dtms = [dt.date(y, 1, 1) + dt.timedelta(d - 1) for y, d in df.index]
df.index = pd.PeriodIndex(dtms, freq='d', name='time')
df.columns = df.columns.astype(int)
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='d')
df.columns = df.columns.astype(int)
return df
class subbasin_daily_runoff(subbasin_daily_discharge):
path = osp.join(RESDIR, 'Q_gauges_all_sub_mm.csv')
class catchment_daily_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'bad.prn')
@staticmethod
def from_project(path, **readkwargs):
df = pd.read_csv(path, delim_whitespace=True, **readkwargs)
dtms = [dt.date(y, 1, 1) + dt.timedelta(d - 1)
for y, d in zip(df.pop('YR'), df.pop('DAY'))]
df.index = pd.PeriodIndex(dtms, freq='d', name='time')
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='d')
return df
class catchment_monthly_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'bam.prn')
@staticmethod
def from_project(path, **readkwargs):
with open(path, 'r') as f:
iyr = int(f.readline().strip().split('=')[1])
df = pd.read_csv(f, delim_whitespace=True, index_col=False,
**readkwargs)
df.dropna(inplace=True) # exclude Year = ...
df = df.drop(df.index[range(12, len(df), 12+1)]) # excluded headers
dtms = ['%04i-%02i' % (iyr+int((i-1)/12.), m)
for i, m in enumerate(df.pop('MON').astype(int))]
df.index = pd.PeriodIndex(dtms, freq='m', name='time')
return df.astype(float)
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='m')
return df
class catchment_annual_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'bay.prn')
plugin = ['plot_mean', 'print_mean']
@staticmethod
def from_project(path, **readkwargs):
df = pd.read_csv(path, delim_whitespace=True, index_col=0,
parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='a')
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='a')
return df
@plot.plot_function
def plot_mean(self, ax=None, runs=None, output=None, **barkw):
bars = plot.plot_waterbalance(self.mean(), ax=ax, **barkw)
return bars
def print_mean(self):
mean = self.mean().to_string()
print(mean)
return
@property
def runoff_coefficient(self):
(_, p), (_, r) = self[['PREC', '3Q']].items()
return r/p
class subcatch_annual_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'bay_sc.csv')
plugin = ['print_mean']
@staticmethod
def from_project(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1], parse_dates=[1],
skipinitialspace=True, **readkwargs)
api = df.index.levels[1].to_period(freq='a')
df.index = df.index.set_levels(api, level=1)
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1], parse_dates=[1], **readkwargs)
api = df.index.levels[1].to_period(freq='a')
df.index = df.index.set_levels(api, level=1)
return df
@property
def runoff_coefficient(self):
(_, p), (_, r) = self[['PREC', '3Q']].items()
return (r/p).unstack(level=0)
def print_mean(self, catchments=None):
"""Print average values. Selected catchments or all (default)."""
df = self.loc[catchments] if catchments else self
ml = 0 if hasattr(df.index, 'levels') else None
mdf = df.mean(level=ml).T
print(mdf.to_string())
return mdf
class hydrotope_daily_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'htp.prn')
@staticmethod
def from_project(path, **readkwargs):
args = dict(
delim_whitespace=True, index_col=[0, 1, 2], parse_dates=[[0, 1]],
date_parser=lambda y, d: dt.datetime.strptime(y+'-'+d, '%Y-%j'))
args.update(readkwargs)
htp = pd.read_csv(path, **args)
htp.index = (htp.index.set_levels(htp.index.levels[0].to_period(), 0)
.reorder_levels([1, 2, 0]))
htp.index.names = ['subbasinID', 'hydrotope', 'time']
return htp
@staticmethod
def from_csv(path, **readkw):
df = pd.read_csv(path, index_col=[0, 1, 2], parse_dates=[2], **readkw)
df.index = df.index.set_levels(df.index.levels[2].to_period(), 2)
return df
class hydrotope_daily_crop_indicators(ProjectOrRunData):
path = osp.join(RESDIR, 'crop.out')
plugin = []
column_names = ['doy', 'water_stress', 'temp_stress', 'maturity',
'biomass', 'lai', 'root_depth']
def from_project(self, path, **readkwargs):
iyr = self.project.config_parameters['iyr']
df = pd.read_csv(path, delim_whitespace=True, header=None,
names=self.column_names)
prds, hydid = [], []
idoy, iy, ihyd = 0, iyr-1, 0
for d in df.pop('doy'):
ihyd = 1 if d != idoy else ihyd+1
idoy = d
hydid += [ihyd]
iy = iy+1 if d == 1 and ihyd == 1 else iy
prds += [dt.date(iy, 1, 1)+dt.timedelta(d-1)]
pix = pd.PeriodIndex(prds, freq='d')
df.index = pd.MultiIndex.from_arrays([pix, hydid],
names=['time', 'hydrotope'])
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1], parse_dates=[0],
date_parser=pd.Period, **readkwargs)
return df
class subbasin_annual_crop_yield(ProjectOrRunData):
path = osp.join(RESDIR, 'cryld.prn')
plugin = []
column_names = ['cropID', 'year', 'subbasinID', 'soilID', 'yield', 'area']
column_seperators = ['Crp=', 'Yr=', 'Sub=', 'Sol=', 'Yld=', 'Area=']
def from_project(self, path, **readkwargs):
df = pd.read_csv(path, sep="|".join(self.column_seperators),
engine='python', header=None, names=self.column_names)
# index
df.set_index(self.column_names[:4], inplace=True)
# clean units
df['yield'] = np.array([y.replace('dt/ha', '') for y in df['yield']],
dtype=float)
df['area'] = np.array([y.replace('ha', '') for y in df['area']],
dtype=float)
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1, 2, 3], **readkwargs)
return df
class gis_files(object):
"""Management plugin to dynamically add GIS file propertyplugins."""
file_names = {'eva-gis': 'annual_evapotranspiration_actual',
'gwr-gis': 'annual_groundwater_recharge',
'pre-gis': 'annual_precipitation',
'run-gis': 'annual_runoff',
'evamon-gis': "monthly_evapotranspiration_actual"
}
class _gis_file(ProjectOrRunData):
"""Generic file interface. path will be assigned through dynamic
subclassing in gis_files._add_gis_file_propertyplugins.
"""
plugin = ['to_raster']
def from_project(self, path, **readkwargs):
return self.project.gis_files.read(path, **readkwargs)
def from_csv(self, path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.columns = df.columns.astype(int)
if len(df.index) > 1:
df.index = df.index.to_period()
return df
def to_raster(self, timestep=None, prefix=None, name=None, strds=True,
mapset=None):
"""Outsourced for reuse to grass.py."""
_subbasin_or_hydrotope_values_to_raster(
self.project, self, self.project.hydrotopes.reclass, name=name,
timestep=timestep, prefix=prefix, strds=strds, mapset=mapset)
return
to_raster.__doc__ = _subbasin_or_hydrotope_values_to_raster.__doc__
def __init__(self, project):
self.project = project
self.gisdir = osp.join(project.projectdir, GISDIR)
self.interfaces = self._create_propertyplugins()
self.project.settings(**self.interfaces)
return
def read(self, pathorname, **readkwargs):
"""Read a SWIM GIS file by full path or by the filename."""
namepath = osp.join(self.gisdir, pathorname)
path = namepath if osp.exists(namepath) else pathorname
df = pd.read_csv(path, delim_whitespace=True, usecols=[0, 2],
header=None, names=['id', 'value'], **readkwargs)
# make 2D array (timesteps, hydrotopes)
nhyd = df.id.max()
dfrs = df.value.T.values.reshape(-1, nhyd)
ids = list(range(1, nhyd+1))
nsteps = dfrs.shape[0]
if nsteps > 1:
ix = self._guess_gis_file_index(nsteps)
dat = pd.DataFrame(dfrs, columns=ids, index=ix)
else:
conf = self.project.config_parameters
name = '%s:%s' % (conf.start_date, conf.end_date)
dat = pd.DataFrame(dfrs, columns=ids, index=[name])
return dat
def _guess_gis_file_index(self, nsteps):
nbyr, iyr = self.project.config_parameters('nbyr', 'iyr')
ixkw = dict(start=str(iyr), periods=nsteps, name='time')
if nsteps == nbyr:
ix = pd.period_range(freq='a', **ixkw)
elif nsteps == nbyr*12:
ix =
|
pd.period_range(freq='m', **ixkw)
|
pandas.period_range
|
import time
import datetime
import gdax
import csv
import pandas as pd
import matplotlib.pyplot as plt
import quandl
import numpy as np
class Data:
# This is a dataframe stored as a CSV
_file = 'data.csv'
_passphrase = '<KEY>'
_API_Key = '<KEY>'
_API_Secret = '<KEY>'
def __init__(self, csv_file='', ticker_name='', start_unix=1420070400, end_unix=int(time.time()), resample_s=1):
if csv_file != '':
df = pd.read_csv(csv_file, parse_dates=False)
df.to_csv(self._file, index=False)
self.csv_reformat(resample_s)
elif ticker_name != '':
auth_client = gdax.AuthenticatedClient(self._API_Key, self._API_Secret, self._passphrase)
data = open(self._file, 'a') # w is for write, a is for append
i = end_unix
missed_data_count = 0
while i > start_unix:
reply_from_server = auth_client.get_product_historic_rates(ticker_name, str(
datetime.datetime.utcfromtimestamp(i).isoformat() + '+00:00'),
str(datetime.datetime.utcfromtimestamp(
i + 199 * 60).isoformat() + '+00:00'),
'60')
for candle_data in reply_from_server:
if candle_data != 'message':
candle_data = str(candle_data).replace('[', '')
candle_data = str(candle_data).replace(']', '')
data.write('\n' + str(candle_data))
else:
missed_data_count += 1
print('missed Data')
i -= 12000
print(i)
time.sleep(0.5)
data.close()
print(missed_data_count)
else:
df = pd.DataFrame(columns=['Time', 'Low', 'High', 'Open', 'Close', 'Volume'])
df.to_csv(self._file, index=False)
# Downsamples data to selected resample period (in seconds) and creates a new file
def csv_reformat(self, resample_s=1):
df = pd.read_csv(self._file, parse_dates=False)
df_resampled = pd.DataFrame(columns=['Time', 'Low', 'High', 'Open', 'Close', 'Volume'])
df = df.sort['Time']
if resample_s != 1:
curr_time = df.loc[0, 'Time']
for i in range(len(df)):
if abs(curr_time - df.loc[i, 'Time']) >= resample_s:
curr_time = df.loc[i, 'Time']
df_resampled = df_resampled.append(df.iloc[[i]])
df = df_resampled
df.to_csv(self._file, index=False)
# Plots a matplotlib graph of the data object
def plot_graph(self, close=False, minim=False, maxim=False, min_value=False, max_value=False, buy_sell=False,
rsi=False):
df = pd.read_csv(self._file, parse_dates=False)
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6, 1), (5, 0), rowspan=5, colspan=1, sharex=ax1)
if 'Close' in df and close:
ax1.plot(df['Time'], df['Close'])
if 'Min' in df and minim:
ax1.scatter(df.index, df['Min'], c='r')
if 'Max' in df and maxim:
ax1.scatter(df.index, df['Max'], c='g')
if 'MinValue' in df and min_value:
ax2.plot(df['Time'], df['MinValue'], c='r')
if 'MaxValue' in df and max_value:
ax2.plot(df['Time'], df['MaxValue'], c='g')
if 'Buy/Sell' in df and buy_sell:
ax2.plot(df['Time'], df['Buy/Sell'])
if 'RSI' in df and rsi:
ax2.plot(df['Time'], df['RSI'])
plt.show()
# Iterates over data and returns the number of missing dates
def find_missing_data(self, time_step=1000):
df = pd.read_csv(self._file, parse_dates=False)
curr_unix_time = time.time()
missing_entry_count = 0
entry_found = False
for i in range(len(df)):
if abs(df.loc[i, 'Time']-curr_unix_time) > time_step:
curr_unix_time = df.loc[i, 'Time']
if not entry_found:
missing_entry_count += 1
else:
entry_found = False
else:
entry_found = True
return missing_entry_count
# Calculates the rsi and adds a column for it, only works with old to recent times
def calc_rsi(self, n=14, based_on='Close'):
df = pd.read_csv(self._file, parse_dates=False)
df['RSI'] = 0.5
df['Change'] = 0
df['Change'] = df[based_on] - df[based_on].shift(-1)
init_total_gain = 0
init_total_loss = 0
for j in range(n):
if df.loc[n - j, 'Change'] > 0:
init_total_gain += df.loc[n - j, 'Change']
else:
init_total_loss -= df.loc[n - j, 'Change']
init_average_gain = init_total_gain / n
init_average_loss = init_total_loss / n
if init_average_loss == 0:
init_rsi = 1
else:
init_rs = init_average_gain / init_average_loss
init_rsi = (100 - (100 / (1 + init_rs))) / 100
df.loc[n, 'RSI'] = init_rsi
prev_avrg_gain = init_average_gain
prev_avrg_loss = init_average_loss
for i in range(n + 1, len(df)):
if df.loc[i, 'Change'] > 0:
prev_avrg_gain = ((prev_avrg_gain * (n-1)) + df.loc[i, 'Change']) / n
prev_avrg_loss = ((prev_avrg_loss * (n-1)) / n)
else:
prev_avrg_gain = ((prev_avrg_gain * (n-1)) / n)
prev_avrg_loss = ((prev_avrg_loss * (n-1)) - df.loc[i, 'Change']) / n
rs = prev_avrg_gain / prev_avrg_loss
rsi = (100 - (100 / (1 + rs))) / 100
df.loc[i, 'RSI'] = rsi
del df['Change']
df.to_csv(self._file, index=False)
# Adds columns RSI and Buy/Sell, Buy/Sell = 1 is buy all, =-1 is sell all
def rsi_bot(self, n=14, buy_rsi=0.3, sell_rsi=0.7, init_holding_fiat=True):
df = pd.read_csv(self._file, parse_dates=False)
self.calc_rsi(n=n)
df['Buy/Sell'] = 0
holding_fiat = init_holding_fiat
for i in range(len(df)):
if (not holding_fiat) and df.loc[i, 'RSI'] > sell_rsi:
df.loc[i, 'Buy/Sell'] = -1.0
holding_fiat = True
elif holding_fiat and df.loc[i, 'RSI'] < buy_rsi:
df.loc[i, 'Buy/Sell'] = 1.0
holding_fiat = False
df.to_csv(self._file, index=False)
# Tests an algorithm based and returns the assets at the end of the test
def back_test(self, init_crypto=0, init_fiat=100):
df =
|
pd.read_csv(self._file, parse_dates=False)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from croniter import croniter, CroniterBadCronError
class DagRunEventsExtractor(object):
def __init__(self, airflow_db, base_date=datetime.now(), n_last_runs=5):
self.airflow_db = airflow_db
self.base_date = base_date
self.n_last_runs = n_last_runs
def _estimate_dag_exec_time(self):
"""
Estimate execution time of the future DAG run.
Takes an average of last 5 runs by default.
"""
df_dag_run = self.airflow_db.load_dag_run_metadata()
df_dag_run.start_date = pd.to_datetime(df_dag_run.start_date)
df_dag_run.end_date =
|
pd.to_datetime(df_dag_run.end_date)
|
pandas.to_datetime
|
"""
Author: <NAME>
Created: 4/12/2020 10:20 AM
"""
import pandas as pd
import numpy as np
import os
import ksl_env
# add basgra nz functions
ksl_env.add_basgra_nz_path()
from basgra_python import run_basgra_nz
from supporting_functions.plotting import plot_multiple_results
from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record
from Pasture_Growth_Modelling.basgra_parameter_sets import get_params_doy_irr, create_days_harvest, \
create_matrix_weather
from Pasture_Growth_Modelling.calculate_pasture_growth import calc_pasture_growth
import matplotlib.pyplot as plt
mode = 'irrigated'
def create_irrigation_abandomnet_data(base_name, params, reseed_trig=-1, reseed_basal=1, site='eyrewell'):
out = {}
weather = get_vcsn_record(site=site)
rest = get_restriction_record()
p, doy_irr = get_params_doy_irr(mode)
matrix_weather = create_matrix_weather(mode, weather, rest, fix_leap=False)
restrict = 1 - matrix_weather.loc[:, 'max_irr'] / 5
matrix_weather.loc[:, 'max_irr'] = 5
days_harvest = create_days_harvest(mode, matrix_weather, site, fix_leap=False)
# set reseed days harvest
idx = days_harvest.doy == 152
days_harvest.loc[idx, 'reseed_trig'] = reseed_trig
days_harvest.loc[idx, 'reseed_basal'] = reseed_basal
# run models
matrix_weather_new = matrix_weather.copy(deep=True)
matrix_weather_new.loc[restrict >= 0.9999, 'max_irr'] = 0
temp = run_basgra_nz(params, matrix_weather_new, days_harvest, doy_irr, verbose=False)
temp.loc[:, 'f_rest'] = 1 - matrix_weather_new.loc[:, 'max_irr'] / 5
temp.loc[:, 'pg'] = calc_pasture_growth(temp, days_harvest, 'from_yield', '1D', resamp_fun='mean')
out['{}_no_rest'.format(base_name)] = temp
matrix_weather_new = matrix_weather.copy(deep=True)
matrix_weather_new.loc[:, 'max_irr'] *= (1 - restrict)
temp = run_basgra_nz(params, matrix_weather_new, days_harvest, doy_irr, verbose=False)
temp.loc[:, 'f_rest'] = 2 - matrix_weather_new.loc[:, 'max_irr'] / 5
temp.loc[:, 'pg'] = calc_pasture_growth(temp, days_harvest, 'from_yield', '1D', resamp_fun='mean')
out['{}_partial_rest'.format(base_name)] = temp
matrix_weather_new = matrix_weather.copy(deep=True)
matrix_weather_new.loc[:, 'max_irr'] *= (restrict <= 0).astype(int)
temp = run_basgra_nz(params, matrix_weather_new, days_harvest, doy_irr, verbose=False)
temp.loc[:, 'f_rest'] = 3 - matrix_weather_new.loc[:, 'max_irr'] / 5
temp.loc[:, 'pg'] = calc_pasture_growth(temp, days_harvest, 'from_yield', '1D', resamp_fun='mean')
out['{}_full_rest'.format(base_name)] = temp
out['{}_mixed_rest'.format(base_name)] = (out['{}_full_rest'.format(base_name)].transpose() *
restrict.values +
out['{}_no_rest'.format(base_name)].transpose() *
(1 - restrict.values)).transpose()
# paddock level restrictions
levels = np.arange(0, 125, 25) / 100
padock_values = {}
temp_out = []
for i, (ll, lu) in enumerate(zip(levels[0:-1], levels[1:])):
matrix_weather_new = matrix_weather.copy(deep=True)
matrix_weather_new.loc[restrict <= ll, 'max_irr'] = 5
matrix_weather_new.loc[restrict >= lu, 'max_irr'] = 0
idx = (restrict > ll) & (restrict < lu)
matrix_weather_new.loc[idx, 'max_irr'] = 5 * ((restrict.loc[idx] - ll) / 0.25)
temp = run_basgra_nz(params, matrix_weather_new, days_harvest, doy_irr, verbose=False)
temp_out.append(temp.values)
temp.loc[:, 'per_PAW'] = temp.loc[:, 'PAW'] / temp.loc[:, 'MXPAW']
temp.loc[:, 'pg'] = calc_pasture_growth(temp, days_harvest, 'from_yield', '1D', resamp_fun='mean')
temp.loc[:, 'f_rest'] = restrict
temp.loc[:, 'RESEEDED'] += i
padock_values['l:{} u:{}'.format(ll, lu)] = temp
temp = np.mean(temp_out, axis=0)
temp2 = out['{}_mixed_rest'.format(base_name)].copy(deep=True).drop(columns=['f_rest', 'pg'])
temp2.loc[:, :] = temp
temp2.loc[:, 'f_rest'] = restrict + 3
temp2.loc[:, 'pg'] = calc_pasture_growth(temp2, days_harvest, 'from_yield', '1D', resamp_fun='mean')
out['{}_paddock_rest'.format(base_name)] = temp2
for k in out:
out[k].loc[:, 'per_PAW'] = out[k].loc[:, 'PAW'] / out[k].loc[:, 'MXPAW']
return out, padock_values
if __name__ == '__main__':
save = False
params, doy = get_params_doy_irr(mode)
outdir = ksl_env.shared_drives(r"Z2003_SLMACC\pasture_growth_modelling\irrigation_tuning")
if not os.path.exists(outdir):
os.makedirs(outdir)
out, paddock_values = create_irrigation_abandomnet_data('b', params, reseed_trig=0.691, reseed_basal=0.722,
site='eyrewell')
# run some stats
outkeys = ['BASAL', 'f_rest', 'IRRIG', 'per_PAW', 'pg']
for k, v in out.items():
v.loc[:, 'month'] = v.index.month
rest = get_restriction_record()
rest = rest.groupby(['year', 'month']).sum()
idxs = pd.MultiIndex.from_product([
|
pd.unique(out['b_paddock_rest'].loc[:, 'year'])
|
pandas.unique
|
# -*- coding: utf-8 -*-
"""
This module contains functionality to comfortably create plots.
"""
from math import floor, ceil, pi
from itertools import islice, chain, cycle, repeat
from collections.abc import Iterable, Mapping
from typing import Union
from warnings import warn
import pandas as pd
import pandas.api.types as pd_types
import numpy as np
from scipy import interpolate
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as plt_colors
import matplotlib.cm as plt_cm
import matplotlib.lines as mlines
import mpl_toolkits.axes_grid1 as axg1
from configparser import ConfigParser
from IPython.display import HTML, display
from tabulate import tabulate
_col_labels = {
'count': 'Anzahl'
}
def spec_col_labels(**kwargs):
"""
Specify labels for column names to be automatically used in plots.
:param kwargs: A map of column names and labels.
"""
_col_labels.update(kwargs)
def spec_col_file(filename):
"""
Specify an INI file with column names to be automatically used in plots.
The column-label-pairs must be placed under the INI section `[Columns]`.
:param filename: A path to the INI file.
"""
cfg = ConfigParser()
cfg.read(filename, encoding='utf8')
_col_labels.update(cfg['Columns'])
def _col_label(label, column):
if label is not None:
return label
if column in _col_labels:
return _col_labels[column]
return column
def table(data: pd.DataFrame, columns=None, labels=None,
with_index=True, index_label=None, limit=None):
"""
Displays an HTML table with the given data.
A subset of columns can be selected with `columns`.
The labels in the header can be explicitly specified with `labels`.
Does not support multi-indexes.
Calls `IPython.display.display()` to present the HTML table.
:param data: A Pandas DataFrame
:param columns: An iterable with column names. (optional)
:param labels: An iterable with column labels. (optional)
Must be the same size as the columns.
:param with_index: A switch to include or exclude the index. (optional)
:param index_label: A string or an iterable with labels for the index.
(optional)
:param limit: A maximum number of rows to display. (optional)
"""
if data.empty:
display(HTML('<p>No Entries</p>'))
columns = columns or data.columns
if labels:
headers = labels
else:
headers = [_col_labels[c] if c in _col_labels else c for c in columns]
if with_index:
headers.insert(0, index_label or 'index')
def cells(r):
return chain((r[0],), (getattr(r, c) for c in columns))
else:
def cells(r):
return (getattr(r, c) for c in columns)
rows = map(cells, data.itertuples())
if limit:
rows = islice(rows, limit)
display(HTML(tabulate(rows, tablefmt='html', headers=headers)))
def _default_figure_handler(subplot, fig, ax=None,
title=None, pad=None,
file_name=None, file_dpi=None):
if not fig:
return
if not subplot:
if pad is not None:
fig.tight_layout(pad=pad)
if file_name:
fig.savefig(file_name, dpi=file_dpi)
if title:
ax = ax or fig.gca()
if ax:
ax.set_title(title)
if not subplot:
plt.show()
current_figure = None
current_grid = (1, 1)
_figure_handler = _default_figure_handler
def _finish_figure(fig=None, **kwargs):
if fig is None:
return
_figure_handler(subplot=_in_multiplot(), fig=fig, **kwargs)
def set_figure_handler(handler):
"""
Set a handler, which is called after rendering every plot.
The specified handler must accept the following keyword arguments:
- ``subplot`` A boolean flag indicating that the figure is a subplot
- ``fig`` The figure object of the plot
- ``ax`` The main axis or `None`
- ``title`` A title for the main axis or `None`
- ``pad`` A padding value for calling `tight_layout()` or `None`
- ``file_name`` The filename for the target image file or `None`
- ``file_dpi`` The dpi value for the target image file or `None`
:param handler: The figure handler to use for future plots
"""
global _figure_handler
_figure_handler = handler
def reset_figure_handler():
"""
Reset the handler, which is called after rendering every plot,
to the default.
"""
global _figure_handler
_figure_handler = _default_figure_handler
def begin(figsize=(10, 5), grid=(1, 1)):
"""
Begins a figure with multiple subplots.
:param figsize: A tuple with the figure size in inches (width, height).
(optional)
:param grid: The grid size to place the subplots in (rows, columns).
(optional)
"""
global current_figure, current_grid
if current_figure is not None:
warn("There is already an open figure. Did you use end()?")
current_figure = plt.figure(figsize=figsize)
current_grid = grid
def end(pad=1.5, w_pad=None, h_pad=None,
file_name=None, file_dpi=300):
"""
Finalizes a figure with multiple subplots.
:param pad: Padding around the figure. (optional)
:param w_pad: Horizontal space between subplots. (optional)
See `matplotlib.pyplot.tight_layout()`.
:param h_pad: Vertical space between subplots. (optional)
See `matplotlib.pyplot.tight_layout()`.
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
global current_figure, current_title
if current_figure is None:
raise Exception("No current figure. Did you use begin()?")
if pad is not None:
plt.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
elif h_pad is not None or w_pad is not None:
plt.tight_layout(h_pad=h_pad, w_pad=w_pad)
fig = current_figure
current_figure = None
_finish_figure(
fig=fig, pad=None,
file_name=file_name, file_dpi=file_dpi)
def _in_multiplot():
global current_figure
return current_figure is not None
def _plt(figsize=(10, 4), pos=(0, 0), rowspan=1, colspan=1):
global current_figure, current_grid
if current_figure:
ax = plt.subplot2grid(current_grid, pos,
rowspan=rowspan, colspan=colspan)
return (current_figure, ax)
else:
fig = plt.figure(figsize=figsize)
return (fig, plt.gca())
def subplot(pos=(0, 0), rowspan=1, colspan=1):
"""
Prepares a sub-plot inside the current figure between calls
of `begin()` and `end()`.
This method is useful, if a custom plot must be integrated
into a multiplot created with `mastersign.datasience.plot`.
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:return: A tuple with Matplotlib figure and axes: ``(fig, ax)``.
"""
if not _in_multiplot():
raise Exception("No current figure. Did you use begin()?")
return _plt(pos=pos, rowspan=rowspan, colspan=colspan)
def _build_key_colors(keys, color):
if isinstance(color, str):
return repeat(color, len(keys))
elif isinstance(color, Mapping):
return [color.get(k, None) or next(plt.gca()._get_lines.prop_cycler)['color']
for k in keys]
elif isinstance(color, Iterable):
return cycle(color)
else:
return [next(plt.gca()._get_lines.prop_cycler)['color'] for k in keys]
def pie(data: Union[pd.DataFrame, pd.Series],
column=None, label_column=None,
color_column=None, color=None,
startangle=180, counterclock=False,
sort_by=None, title=None, pct=True,
figsize=(4, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a pie chart with values from a column in a DataFrame
or a Series.
:param data: A Pandas DataFrame or Series.
:param column: The column to use if `data` is a DataFrame.
:param label_column: A column to use for the labels. (optional)
By default the index is used.
:param color_column: A column with color names or RGB hex values.
(optional)
:param color: A list or dict for the colors in the pie.
(optional)
If it is a dict the keys are the labels.
Gets overridden by `color_column`.
:param sort_by: The sort mode `None`, `"label"`, or `"value"`
(optional)
:param startangle: The start angle in degrees. (optional)
:param counterclock: A switch to control the angular order. (optional)
:param title: The title of the plot. (optional)
:param pct: A switch to display percentages. (optional)
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
# data is a DataFrame
if column is None:
raise TypeError("If data is a DataFrame, column must be specified.")
if sort_by:
data = data.sort_values(by=label_column) \
if label_column else data.sort_index()
if sort_by == 'value':
data.sort_values(by=column, ascending=False, inplace=True)
x = data[column]
labels = data[label_column] if label_column else data.index
else:
# data is assumed to be a Series
if sort_by:
data = data.sort_index()
if sort_by == 'value':
data.sort_values(ascending=False, inplace=True)
x = data
labels = data.index
color_column = None # ignore color_column for Series
(fig, ax) = _plt(figsize=figsize, pos=pos,
rowspan=rowspan, colspan=colspan)
if color_column:
colors = data[color_column]
elif isinstance(color, Mapping):
colors = [color.get(l) or next(plt.gca()._get_lines.prop_cycler)['color']
for l in labels]
elif color:
colors = color
else:
colors = None
if pct:
ax.pie(x, labels=labels, colors=colors,
startangle=startangle, counterclock=counterclock,
autopct='%1.1f%%')
else:
ax.pie(x, labels=labels, colors=colors,
startangle=startangle, counterclock=counterclock)
ax.axis('equal')
_finish_figure(
fig=fig, ax=ax, title=title, pad=pad,
file_name=file_name, file_dpi=file_dpi)
def pie_groups(data: Union[pd.DataFrame, pd.Series],
column=None, sort_by=None,
startangle=180, counterclock=False,
title=None, pct=True, color=None,
figsize=(4, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a pie chart by counting rows according to a column value
from a DataFrame or values from a Series.
:param data: A Pandas DataFrame or Series.
:param column: The column to use for grouping.
:param sort_by: The sort mode `None`, `"label"`, or `"value"`
:param startangle: The start angle in degrees. (optional)
:param counterclock: A switch to control the angular order. (optional)
:param title: The title of the plot.
:param pct: A switch to display percentages.
:param color: A list or dict for the colors in the pie.
(optional)
If it is a dict the groups are the labels.
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
groups = data.groupby(column, sort=False).size()
else:
groups = data.groupby(by=data, sort=False).size()
group_data = pd.DataFrame({'value': groups}, index=groups.index)
pie(group_data, 'value', sort_by=sort_by,
startangle=startangle, counterclock=counterclock,
title=title, pct=pct, color=color,
figsize=figsize, pad=pad, pos=pos, rowspan=rowspan, colspan=colspan,
file_name=file_name, file_dpi=file_dpi)
def bar(data: Union[pd.DataFrame, pd.Series],
value_column=None, label_column=None,
color_column=None, cmap=None, color=None,
xlabel=None, ylabel=None, title=None,
figsize=(10, 4), pad=1, pos=(0, 0), rowspan=1, colspan=1,
file_name=None, file_dpi=300):
"""
Display a bar chart from columns in a DataFrame or a Series.
:param data: A Pandas DataFrame or Series.
:param value_column: The column with the values for the bars height.
:param label_column: The column with the labels for the bars. (optional)
:param color_column: The column with a numeric value for choosing
a color from a color map or strings
for explicit colors. (optional)
:param cmap: The name of a color map to use with `color_column`.
(optional)
:param color: A color for all bars or a list with colors. (optional)
`color_column` superseeds `color`.
:param xlabel: The label for the X axis. (optional)
:param ylabel: The label for the Y axis. (optional)
:param title: The title of the plot. (optional)
:param figsize: The figure size in inches. (optional)
:param pad: Padding around the figure. (optional)
:param pos: The position in the grid of a multiplot. (optional)
:param rowspan: The number of rows to span in the grid
of a multiplot. (optional)
:param colspan: The number of columns to span in the grid
of a multiplot. (optional)
:param file_name: A path to a file to save the plot in. (optional)
:param file_dpi: A resolution to render the saved plot. (optional)
"""
if isinstance(data, pd.DataFrame):
all_columns = [value_column, label_column, color_column]
columns = set(c for c in all_columns if c)
data = data.loc[:, columns].dropna()
values = data[value_column]
if label_column:
labels = data[label_column]
else:
labels = values.index
else:
values = data
labels = data.index
color_column = None # ignore color_column for Series
(fig, ax) = _plt(figsize=figsize, pos=pos,
rowspan=rowspan, colspan=colspan)
bars = ax.bar(labels, values)
if color_column:
colors = data[color_column]
if
|
pd_types.is_numeric_dtype(colors.dtype)
|
pandas.api.types.is_numeric_dtype
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [0, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
result = trex_empty.eec_diet_max(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_bird(self):
"""
unit test for function eec_dose_bird;
internal call to 'eec_diet_max' --> 'eed_diet_timeseries' --> conc_initial' and 'conc_timestep' are included;
internal call to 'fi_bird' included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_bird' are correctly implemented
* methods called inside of 'eec_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([7.763288, 2693.2339, 22.20837], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 240.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
# variables for 'fi_bird' (values reflect unittest for 'at_bird'
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
result = trex_empty.eec_dose_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_bird(self):
"""
unit test for function arq_dose_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_bird' are correctly implemented
* methods called inside of 'arq_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.007014, 1.146429, 0.02478172], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
# variables for 'at_bird' (values reflect unittest for 'fi_bird'
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
result = trex_empty.arq_dose_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_diet_bird(self):
"""
unit test for function arq_diet_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_diet_bird' are correctly implemented
* methods called inside of 'arq_diet_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.019563, 1.509543, 0.0046715], dtype='float')
result = pd.Series([], dtype = 'float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
#trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.food_multiplier_init_sg = 110.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.], dtype='float')
trex_empty.lc50_bird = pd.Series([650., 718., 1102.], dtype='float')
#for i in range (len(trex_empty.food_multiplier_init_sg)):
# result[i] = trex_empty.arq_diet_bird(trex_empty.food_multiplier_init_sg[i])
result = trex_empty.arq_diet_bird(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_crq_diet_bird(self):
"""
unit test for function crq_diet_bird;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'crq_diet_bird' are correctly implemented
* methods called inside of 'crq_diet_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([2.5432, 60.214, 0.050471], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 110.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
trex_empty.noaec_bird = pd.Series([5., 18., 102.])
result = trex_empty.crq_diet_bird(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_mamm(self):
"""
unit test for function eec_dose_mamm;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_mamm' are correctly implemented
* methods called inside of 'eec_dose_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.36738, 124.3028, 0.989473], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.eec_dose_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1,
trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_arq_dose_mamm(self):
"""
unit test for function arq_dose_mamm;
internal calls to 'eec_diet_max' --> 'eec_diet_timeseries' --> 'conc_initial' and 'conc_timestep' are included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'arq_dose_mamm' are correctly implemented
* methods called inside of 'arq_dose_mamm' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.0083319, 3.755716, 0.01906], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [5, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'list of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 240.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.mf_w_mamm_1 =
|
pd.Series([0.1, 0.8, 0.9], dtype='float')
|
pandas.Series
|
import asyncio
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
import datetime
import os
import shutil
import sys
import time
import pandas as pd
import pytest
import requests
from .test_utils import *
from glide import *
from glide.utils import join
def test_placeholder_node(rootdir):
nodes = PlaceholderNode("extract") | CSVLoad("load")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider["extract"] = CSVExtract("extract")
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=20), load=dict(f=f))
def test_return_value():
glider = Glider(Return("load"))
val = glider.consume(range(0, 10))
assert val == list(range(0, 10))
def test_invalid_node_name():
with pytest.raises(AssertionError):
glider = Glider(PlaceholderNode("data") | Print("load"))
def test_shell_node(rootdir):
nodes = CSVExtract("extract") | CSVLoad("load") | Shell("shell") | Print("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume(
[infile],
extract=dict(chunksize=10, nrows=20),
load=dict(f=f),
shell=dict(cmd="ls -lt %s" % outfile, shell=True, capture_output=True),
)
def test_profiler_node(rootdir):
nodes = Profile("profile") | CSVExtract("extract") | CSVLoad("load")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=20), load=dict(f=f))
def test_filter_node(rootdir):
nodes = (
CSVExtract("extract")
| Filter("filter", func=lambda n, d: len(d) == 5)
| Reduce("reduce", flatten=True)
| LenPrint("len")
| CSVLoad("load")
| AssertFunc("length_check", func=lambda n, d: len(d) == 5)
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=15), load=dict(f=f))
def test_file_copy(rootdir):
nodes = FileCopy("copy") | Print("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
if os.path.isfile(outfile):
os.remove(outfile)
glider.consume([infile], copy=dict(f_out=outfile))
assert os.path.isfile(outfile)
def test_file_concat(rootdir):
nodes = FileConcat("concat") | Print("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
infile1 = "%s/%s.1.csv" % (test_config["OutputDirectory"], TEST_DATA_NAME)
infile2 = "%s/%s.2.csv" % (test_config["OutputDirectory"], TEST_DATA_NAME)
copyfile(infile, infile1)
copyfile(infile, infile2)
infiles = [infile1, infile2]
try:
if os.path.isfile(outfile):
os.remove(outfile)
glider.consume([infiles], concat=dict(f_out=outfile))
assert os.path.isfile(outfile)
finally:
for infile in infiles:
rmfile(infile)
def test_join():
l1 = [
dict(a=1, b=2, c="test1"),
dict(a=2, b=4, c="test2"),
dict(a=3, b=6, c="test3"),
]
l2 = [
dict(a=1, b=2, c="test1", d=5.1),
dict(a=2, b=5, c="test2"),
dict(a=3, b=7, c="test3"),
dict(a=4, b=9, c="test4"),
]
l3 = [dict(a=1, b=2, d=6), dict(a=2, b=11, d=7)]
data = [l1, l2, l3]
on = "a"
dfs = [
|
pd.DataFrame.from_records(l, index=on)
|
pandas.DataFrame.from_records
|
from cProfile import label
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
# load data
df =
|
pd.read_csv(r"C:\Users\Lenovo\Desktop\Project\data.csv")
|
pandas.read_csv
|
# coding: utf-8
import pymysql
import numpy as np
import pandas as pd
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
# 필요한 다른 python 파일
import feature
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
### train_set - 뼈대
def make_train_set():
SQL = "SELECT order_id, user_id, order_dow, order_hour_of_day FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print("make train set - basic start")
# ------------------ train id에 맞는 유저를 찾은 뒤 그 유저가 최근에 샀던 상품 확인
# order_id 중복 제거 >> 갯수 세는 것 같지만 중복 제거
train_df= train_df.groupby("order_id").aggregate("count").reset_index()
# order_id에 맞는 user_id를 찾아서 merge
train_df = pd.merge(train_df, orders_df, how="inner", on="order_id")
# prior과 merge
# 유저와 order_id 에 맞는 상품 목록
train_df = pd.merge(train_df, feature.latest_order(), how="inner", on="user_id")
# product table에서 id, 소분류, 대분류만 가져와서 merge
# products_df = pd.read_csv( "products.csv", usecols=["product_id", "aisle_id", "department_id"])
SQL = "SELECT product_id, aisle_id, department_id FROM products"
products_df = pd.read_sql(SQL, db)
train_df =
|
pd.merge(train_df, products_df, how="inner", on="product_id")
|
pandas.merge
|
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import datetime
from PIL import Image
import pickle
#intro
# st.sidebar.write("This is an application for predicting COVID cases around the country!")
# st.sidebar.button("Predict")
from HTML_snippets import Title_html
st.markdown(Title_html, unsafe_allow_html=True) #Title rendering
st.markdown("The dashboard will visualize the Covid-19 cases worldwide")
st.markdown("Coronavirus disease (COVID-19) is an infectious disease caused by a newly discovered coronavirus. Most people infected with the COVID-19 virus will experience mild to moderate respiratory illness and recover without requiring special treatment. This app gives you the real-time predicted daily new cases of COVID-19")
st.sidebar.title("Visualization Selector")
st.sidebar.markdown("Select the Charts/Plots accordingly:")
# DATA_URL=('E:\Data science Projects\NIELIT project\covid_19_world.csv')
# For different use case where the data does not change often
# @st.cache(persist=True)
def load_data():
data=pd.read_csv("./../2020-08-01_2020-08-04_predictions_example.csv")
# data = pd.read_csv("https://github.com/OxCGRT/covid-policy-tracker/blob/master/data/OxCGRT_latest.csv")
return data
df=load_data()
if st.checkbox('Show dataframe'):
st.write(df)
# st.sidebar.checkbox("Show Analysis by Country", True, key=1)
select = st.sidebar.selectbox('Select a Country',df['CountryName'].unique())
#get the country selected in the selectbox
country_data = df[df['CountryName'] == select]
if st.sidebar.checkbox("Show Analysis by Country", True, key=2):
st.markdown("## **Country level analysis**")
if not st.checkbox('Hide Graph', False, key=1):
country_total_graph = px.line(
country_data,
x='Date',
y='PredictedDailyNewCases',
labels={
'PredictedDailyNewCases':'<b>Number of Cases (per 100k?)</b>',
'Date':'<b>Date</b>'
},
title=f'<b>Overall Predicted Daily New Cases in {select}</b>')
#color='Date')
country_total_graph.update_layout(
xaxis_tickformat="%b %d %Y",
xaxis_nticks=len(list(country_data['Date'])),
yaxis_range = [0,max(list(country_data['PredictedDailyNewCases']))]
)
country_total_graph.update_yaxes(tick0 = 0)
st.plotly_chart(country_total_graph)
#st.write(country_data)
stringency = st.slider('Select the level of stringency for NPIs', 0, 9)
prescribe_df = pd.read_csv('all_2021q1_test_task.csv')
prescribe_df = prescribe_df[prescribe_df['CountryName'] == select] #select the country
prescribe_df = prescribe_df[pd.to_datetime(prescribe_df['Date']) >= datetime.datetime.today()] #select today and future dates
prescribe_df = prescribe_df[prescribe_df['PrescriptionIndex'] == stringency] #select the relevant prescription index
st.write(prescribe_df)
# all_npis = ['C1_School closing', 'C2_Workplace closing', 'C3_Cancel public events', 'C4_Restrictions on gatherings',
# 'C5_Close public transport', 'C6_Stay at home requirements', 'C7_Restrictions on internal movement',
# 'C8_International travel controls', 'H1_Public information campaigns', 'H2_Testing policy',
# 'H3_Contact tracing', 'H6_Facial Coverings', 'Date', 'CountryName', 'RegionName', 'PrescriptionIndex']
# `npis` is in reverse order of `all_npis` because of the way the matrix ends up when it's transposed
# npis = ['H6_Facial Coverings', 'H3_Contact tracing', 'H2_Testing policy',
# 'H1_Public information campaigns', 'C8_International travel controls',
# 'C7_Restrictions on internal movement', 'C6_Stay at home requirements',
# 'C5_Close public transport', 'C4_Restrictions on gatherings', 'C3_Cancel public events',
# 'C2_Workplace closing', 'C1_School closing']
first_date = datetime.datetime.today() - datetime.timedelta(days=1)
last_date =
|
pd.to_datetime(prescribe_df['Date'].values[-1])
|
pandas.to_datetime
|
# category: ["region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]("user_id"?)
# 1. category base count features
# 2. category embedding.
from utils import *
import pandas as pd
import gc
train = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"]).drop(["deal_probability", "image", "image_top_1"],axis=1)
test = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"]).drop(["image", "image_top_1"],axis=1)
train_active = pd.read_csv("../input/train_active.csv", parse_dates = ["activation_date"])
test_active = pd.read_csv("../input/test_active.csv", parse_dates = ["activation_date"])
all_df = pd.concat([train, test, train_active, test_active])
del train_active, test_active;gc.collect()
all_df["dayofweek"] = all_df.activation_date.dt.weekday
train["dayofweek"] = train.activation_date.dt.weekday
test["dayofweek"] = test.activation_date.dt.weekday
all_df["one"] = 1
print("Done Reading All Data")
def get_category_df(col):
tmp = pd.DataFrame()
all_df[col] = all_df[col].fillna("NAN")
tmp["{}_count".format(col)] = all_df.groupby(col)["one"].sum()
tmp["{}_unique_user_count".format(col)] = all_df[["user_id", col]].groupby(col).agg(pd.Series.nunique)
tmp["{}_price_median".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.median)
tmp["{}_price_std".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.std)
tmp["{}_price_max".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.max)
tmp["{}_price_min".format(col)] = all_df[[col, "price"]].groupby(col).agg(np.min)
tmp["latest_date"] = all_df[[col, "activation_date"]].groupby(col).max()
tmp["first_date"] = all_df[[col, "activation_date"]].groupby(col).min()
tmp["{}_diff".format(col)] = (tmp["latest_date"] - tmp["first_date"]).dt.days
tmp["{}_average_period".format(col)] = tmp["{}_diff".format(col)] / tmp["{}_count".format(col)]
tmp.drop(["latest_date", "first_date"], axis=1, inplace=True)
return tmp.reset_index()
print("Categorical Features...")
region = get_category_df("region")
city = get_category_df("city")
parent_category_name = get_category_df("parent_category_name")
category_name = get_category_df("category_name")
user_type = get_category_df("user_type")
param_1 = get_category_df("param_1")
param_2 = get_category_df("param_2")
param_3 = get_category_df("param_3")
category = {"region":region, "city":city, "parent_category_name":parent_category_name
,"category_name":category_name,"user_type":user_type, "param_1":param_1
, "param_2":param_2, "param_3":param_3}
cate_col = list(category.keys())
train = train[cate_col]
test = test[cate_col]
for col, d in category.items():
train = pd.merge(train, d, on=col, how="left")
test = pd.merge(test, d, on=col, how="left")
train.drop(cate_col, axis=1, inplace=True)
test.drop(cate_col, axis=1, inplace=True)
to_parquet(train, "../features/fe_categorical_base_features_train.parquet")
to_parquet(test, "../features/fe_categorical_base_features_test.parquet")
# weekday
def get_category_weekday_df(col):
all_df[col] = all_df[col].fillna("NAN")
tmp =
|
pd.DataFrame()
|
pandas.DataFrame
|
from data_set_preprocessor.classes.processing_method.processing_method import ProcessingMethod
import pandas
class OneHotEncoderProcessingMethod(ProcessingMethod):
def process_data_set(self, data_set):
processed_data_set = data_set.copy()
categorical_columns = self.get_non_numerical_columns(processed_data_set)
if categorical_columns:
processed_data_set = self.encode_dataset(categorical_columns, processed_data_set)
return processed_data_set
def get_non_numerical_columns(self, processed_data_set):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numerical_columns = list(processed_data_set.select_dtypes(include=numerics).columns)
categorical_columns = list(set(processed_data_set.columns.tolist()) - set(numerical_columns))
return categorical_columns
def encode_dataset(self, categorical_columns, processed_data_set):
dummies = pandas.get_dummies(processed_data_set[categorical_columns])
processed_data_set =
|
pandas.concat([processed_data_set, dummies], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from decorator import decorator
# Moving Average
def MA(ds, n):
MA = pd.Series(ds.rolling(n).mean(), name = 'MA_' + str(n))
return MA
# difference between short MA and long MA
def diffMA(ds, l=60, s=5):
"""
ds: dataset is pandas data series
"""
ma_l = ds.rolling(l, min_periods=l).mean()
ma_s = ds.rolling(s, min_periods=s).mean()
return (ma_s/ma_l)-1
# Linear Regression
import statsmodels.formula.api as smf
def liner_regression(x,y):
model = smf.OLS(y,x)
results = model.fit()
b = results.params
R = results.rsquared
pvalue = results.pvalues
t='Y=%0.4fX --- R2=%0.2f%% --- p-value=%0.4f' %(b[0], R*100, pvalue[0])
return b,t
# slope of MA
def slopeMA(ds, m=60, dw=5):
ma = ds.rolling(m, min_periods=1).mean()
slope = ma.copy()
x = np.arange(1,dw+1)/100.0
for t in range(dw,len(slope)):
y = ma[t-dw+1:t+1] / ma[t-dw+1:t+1].mean() - 1
slope[t], _ = liner_regression(x,y)
return slope
# garch
def addGARCH(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_garch(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical var
def addVAR(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_constant_mean(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
#print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical cov
def addCOV(ds1, ds2, hln=200):
ts1 = ds1.to_returns().dropna().values
ts2 = ds2.to_returns().dropna().values
cov = []
#cov.append(np.nan) # add 1 when dropna at prices->returns
for t in range(hln):
cov.append(np.nan)
for t in range(hln, len(ts1)+1):
f_cov = np.cov(ts1[t-hln:t], ts2[t-hln:t])
cov.append(f_cov[0][1]*10000)
return cov
# Seek Best Garch Model
import statsmodels.tsa.api as smt
def seek_garch_model(TS):
"""
TS is returns of a price-series
numpy array or array
# Seek Best GARCH Model
res_tup = seek_garch_model(ts)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
# Using student T distribution usually provides better fit
am = arch_model(ts, p=p_, o=o_, q=q_, dist='StudentsT')
res = am.fit(update_freq=5, disp='off')
fig = res.plot(annualize='D')
print(res.summary())
ts_plot(res.resid, lags=30)
"""
best_aic = np.inf
best_order = None
best_mdl = None
pq_rng = range(5) # [0,1,2,3,4]
d_rng = range(2) # [0,1]
for i in pq_rng:
for d in d_rng:
for j in pq_rng:
try:
tmp_mdl = smt.ARIMA(TS, order=(i,d,j)).fit(
method='mle', trend='nc'
)
tmp_aic = tmp_mdl.aic
if tmp_aic < best_aic:
best_aic = tmp_aic
best_order = (i, d, j)
best_mdl = tmp_mdl
except: continue
print('aic: {:6.5f} | order: {}'.format(best_aic, best_order))
return best_aic, best_order, best_mdl
#under arch model scheme
@decorator
def forecast_var(model_est_var, *args, **kwargs):
"""
Use historical data (0 to t) to forecast variance at t+1
via the model (defined in arch)
Args:
* args[0]: returns (numpy array or array): Returns for security.
Returns:
forecast variance: float
residuals: array
"""
if len(args)<1:
raise Exception("Not Enough Parameters")
m = model_est_var(*args, **kwargs)
res = m.fit(update_freq=5, disp='off')
return res.forecast().variance.values[-1][0], res.resid
from arch.univariate import ConstantMean
@forecast_var
def forecast_var_from_constant_mean(returns):
"""
returns is historical returns
"""
return ConstantMean(returns)
from arch import arch_model
@forecast_var
def forecast_var_from_garch(returns):
"""
returns is historical returns
"""
return arch_model(returns, vol='Garch', p=1, o=0, q=1, dist='Normal')
@forecast_var
def forecast_var_from_best(returns):
"""
returns is historical returns
"""
from pyetf.algos import seek_garch_model
from arch import arch_model
res_tup = seek_garch_model(returns)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
return arch_model(returns, p=p_, o=o_, q=q_, dist='StudentsT')
# future mean and var
def future_mean_var(p, negative=False):
"""
p is numpy and prices series in future m dates
negative is True: calculate if p(t) < p(0)
negative is False: calculate all p(t)
"""
m = len(p)
dr = []
if negative:
for d in range(1,m):
if p[d]<p[0]:
dr.append((p[d]/p[0])**(1/d)-1)
if len(dr) == 0:
dr.append(0.)
else:
for d in range(1,m):
dr.append((p[d]/p[0])**(1/d)-1)
mean = np.mean(dr)
var = np.var(dr)
return mean, var
# future mean and var
def future_covar(p1, p2=None):
"""
p1 and p2 are numpy and prices series in future fm(30) dates
+ historical hm(200-fm) dates
p1 = p2: calculate var
"""
r1 = np.diff(p1)/p1[0:len(p1)-1]
if p2 is None:
return np.var(r1)
else:
r2 = np.diff(p2)/p1[0:len(p2)-1]
return np.cov(r1, r2)
# under keras model scheme
def strucutre_keras_model(train_model, addFeatures, addTarget, prices, prices_two=None, model_path="\\keras_model\\"):
"""
* prices: pandas series (or dataframe) with date index and prices
* function will save model estimated by keras
to a h5 file named 'est_var(_ticker_).h5'
* load model
from keras.models import load_model
model_load = load_model('est_var(_ticker_).h5')
"""
# 1. Data Process
if prices_two is None:
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
# 1.2 process data
x_dataset, y_dataset = processData(addFeatures, addTarget, dataset)
else:
dataset, model_filename = initData_two(prices, prices_two, model_path)
x_dataset, y_dataset = processData_two(addFeatures, addTarget, dataset)
# 1.3 split train set and test set
x_train, y_train, x_test, y_test = splitDataset(x_dataset, y_dataset)
# 1.4 shuttle train set
x_train, y_train = shuffleDataset(x_train, y_train)
# 2. Build Model
# 2.1 setup model
# 2.2 train model
model = train_model(x_train, y_train)
# 2.3 save model
model.save(model_filename)
# 3 evaluation
trainScore = model.evaluate(x_train, y_train)
testScore = model.evaluate(x_test, y_test)
print(f"Train Score Loss: {trainScore[0]:0.4f}")
print(f"Test Score Loss: {testScore[0]:0.4f}")
# 4. Plot Results
plt.figure(figsize=(10, 8))
#plt.plot(y_dataset)
#plt.plot(y_predict)
plt.plot(y_test)
plt.plot(model.predict(x_test))
plt.show()
from keras.models import load_model
def load_keras_model(prices, model_path="\\keras_model\\"):
# 1. Data Process
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
model = load_model(model_filename)
return dataset, model
# stucture X and Y from dataset
def buildXY(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.drop(columns='y').values
y_dataset = dataset['y'].values
dataX, dataY = [], []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
dataY.append(y_dataset[t+m-1])
return np.array(dataX), np.array(dataY)
# stucture X from dataset to forecast
def buildX(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.values
dataX = []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
return np.array(dataX)
# normalize dataset
from sklearn.preprocessing import MinMaxScaler
def normalise_windows(window_data):
scaler = MinMaxScaler(feature_range=(0, 1))
normalised_data = []
for window in window_data:
normalised_window = scaler.fit_transform(window)
normalised_data.append(normalised_window)
return normalised_data
# split dataset to train and test
def splitDataset(x_dataset, y_dataset, train_size_ratio=0.6):
train_size = int(len(x_dataset) * train_size_ratio)
x_train, x_test = x_dataset[0:train_size], x_dataset[train_size:len(x_dataset)]
y_train, y_test = y_dataset[0:train_size], y_dataset[train_size:len(y_dataset)]
return np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)
# random train dataset
def shuffleDataset(x, y):
np.random.seed(10)
randomList = np.arange(x.shape[0])
np.random.shuffle(randomList)
return x[randomList], y[randomList]
# initial Data and model name
def initData(prices, model_path, model_name='est_var'):
if isinstance(prices, pd.core.series.Series):
e = prices.name
dataset = pd.DataFrame(prices)
else:
e = prices.columns[0]
dataset = prices.copy()
print(f"{e}")
dataset = dataset.rename({e:'price'}, axis=1)
model_path = os.getcwd() + model_path
model_filename = model_path + model_name + '(' + e + ').h5'
return dataset, model_filename
# initial Data and model name
def initData_two(prices_one, prices_two, model_path, model_name='est_cov'):
if isinstance(prices_one, pd.core.series.Series):
e1 = prices_one.name
dataset =
|
pd.DataFrame(prices_one)
|
pandas.DataFrame
|
#!/usr/bin/env python
"""
Parsing GO Accession from a table file produced by InterProScan and mapping to GOSlim.
(c) <NAME> 2018 / MIT Licence
kinomoto[AT]sakura[DOT]idv[DOT]tw
"""
from __future__ import print_function
from os import path
import sys
import pandas as pd
from goatools.obo_parser import GODag
from goatools.mapslim import mapslim
from joblib import Parallel, delayed
import optparse
p = optparse.OptionParser("%prog [options] <eggnog_diamond_file> <go_obo_file>")
p.add_option("-o", "--out", dest="output_filename", help="Directory to store " "the output file [default: GO_term_annotation.txt]", action="store", type="string", default="GO_term_annotation.txt")
p.add_option("-g", "--goslim", dest="goslim_obo_file", action="store",
help="The .obo file for the most current GO Slim terms "
"[default: Null]", type="string", default=None)
p.add_option("-O", "--goslim_out", dest="goslim_output_filename", action="store", help="Directory to store the output file [default: " "GOSlim_annotation.txt]", type="string", default="GOSlim_annotation.txt")
p.add_option("-t", "--goslim_type", dest="goslim_type", action="store", type="string", default="direct", help="One of `direct` or `all`. Defines "
"whether the output should contain all GOSlim terms (all "
"ancestors) or only direct GOSlim terms (only direct "
"ancestors) [default: direct]")
p.add_option("-s", "--sort", dest="is_sort", action="store_true", default=False, help="Sort the output table [default: False]")
opts, args = p.parse_args()
# check for correct number of arguments
if len(args) != 2:
p.print_help()
sys.exit(1)
interpro_file = args[0]
assert path.exists(interpro_file), "file %s not found!" % interpro_file
obo_file = args[1]
assert path.exists(obo_file), "file %s not found!" % obo_file
# check that --goslim is set
USE_SLIM = False
if (opts.goslim_obo_file is not None):
assert path.exists(opts.goslim_obo_file), "file %s not found!" % opts.goslim_obo_file
USE_SLIM = True
# check that slim_out is either "direct" or "all" and set according flag
if opts.goslim_type.lower() == "direct":
ONLY_DIRECT = True
elif opts.goslim_type.lower() == "all":
ONLY_DIRECT = False
else:
p.print_help()
sys.exit(1)
# load InterProScan_tsv_file
interpro_table = pd.read_csv(interpro_file, sep='\t',skiprows=3,skipfooter=3,engine='python')
#interpro_go = interpro_table[['#query_name', 'GO_terms']]
all_protein=list(interpro_table['#query_name'])
gos=list(interpro_table['GO_terms'])
# load obo files
go = GODag(obo_file, load_obsolete=True)
output_hd = ['Protein Accession', 'GO Category', 'GO Accession', 'GO Description', 'GO Level']
output_table = pd.DataFrame(columns=output_hd)
def tmp_func(pro):
all_go_accs_in_a_protein = set()
go_accs = gos[pro]
output_hd = ['Protein Accession', 'GO Category', 'GO Accession', 'GO Description', 'GO Level']
output_table = pd.DataFrame(columns=output_hd)
if not pd.isnull(go_accs):
all_go_accs_in_a_protein = go_accs.split(',')
#print(pro)
if len(all_go_accs_in_a_protein) > 0:
for go_term in all_go_accs_in_a_protein:
if go_term not in go:
continue
query_term = go.query_term(go_term)
output_table = output_table.append(pd.DataFrame({'Protein Accession': [all_protein[pro]], 'GO Category': [query_term.namespace], 'GO Accession': [go_term], 'GO Description': [query_term.name], 'GO Level':[query_term.level]}), ignore_index=True)
return(output_table)
#len(all_protein)
# start to annotate
results=Parallel(n_jobs=15)(delayed(tmp_func)(pro) for pro in range(len(all_protein)))
output_hd = ['Protein Accession', 'GO Category', 'GO Accession', 'GO Description', 'GO Level']
output_table =
|
pd.DataFrame(columns=output_hd)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
module for trade class
"""
import math
import datetime as dt
import logging
import pandas as pd
from pyecharts.charts import Bar, Line
from pyecharts import options as opts
import xalpha.remain as rm
from xalpha.cons import convert_date, line_opts, myround, xirr, yesterdayobj
from xalpha.exceptions import ParserFailure, TradeBehaviorError
from xalpha.record import irecord
import xalpha.universal as xu
from xalpha.universal import get_rt
logger = logging.getLogger(__name__)
def xirrcal(cftable, trades, date, startdate=None, guess=0.01):
"""
calculate the xirr rate
:param cftable: cftable (pd.Dateframe) with date and cash column
:param trades: list [trade1, ...], every item is an trade object,
whose shares would be sold out virtually
:param date: string of date or datetime object,
the date when virtually all holding positions being sold
:param guess: floating number, a guess at the xirr rate solution to be used
as a starting point for the numerical solution
:returns: the IRR as a single floating number
"""
date = convert_date(date)
partcftb = cftable[cftable["date"] <= date]
if len(partcftb) == 0:
return 0
if not startdate:
cashflow = [(row["date"], row["cash"]) for i, row in partcftb.iterrows()]
else:
if not isinstance(startdate, dt.datetime):
startdate = dt.datetime.strptime(
startdate.replace("-", "").replace("/", ""), "%Y%m%d"
)
start_cash = 0
for fund in trades:
start_cash += fund.briefdailyreport(startdate).get("currentvalue", 0)
cashflow = [(startdate, -start_cash)]
partcftb = partcftb[partcftb["date"] > startdate]
cashflow.extend([(row["date"], row["cash"]) for i, row in partcftb.iterrows()])
rede = 0
for fund in trades:
if not isinstance(fund, itrade):
partremtb = fund.remtable[fund.remtable["date"] <= date]
if len(partremtb) > 0:
rem = partremtb.iloc[-1]["rem"]
else:
rem = []
rede += fund.aim.shuhui(
fund.briefdailyreport(date).get("currentshare", 0), date, rem
)[1]
else: # 场内交易
rede += fund.briefdailyreport(date).get("currentvalue", 0)
cashflow.append((date, rede))
return xirr(cashflow, guess)
def bottleneck(cftable):
"""
find the max total input in the history given cftable with cash column
:param cftable: pd.DataFrame of cftable
"""
if len(cftable) == 0:
return 0
# cftable = cftable.reset_index(drop=True) # unnecessary as iloc use natural rows instead of default index
inputl = [-sum(cftable.iloc[:i].cash) for i in range(1, len(cftable) + 1)]
return myround(max(inputl))
def turnoverrate(cftable, end=yesterdayobj()):
"""
calculate the annualized turnoverrate
:param cftable: pd.DataFrame of cftable
:param end: str or obj of datetime for the end date of the estimation
"""
if len(cftable) == 0:
return 0
end = convert_date(end)
start = cftable.iloc[0].date
tradeamount = sum(abs(cftable.loc[:, "cash"]))
turnover = tradeamount / bottleneck(cftable) / 2.0
if (end - start).days <= 0:
return 0
return turnover * 365 / (end - start).days
def vtradevolume(cftable, freq="D", rendered=True):
"""
aid function on visualization of trade summary
:param cftable: cftable (pandas.DataFrame) with at least date and cash columns
:param freq: one character string, frequency label, now supporting D for date,
W for week and M for month, namely the trade volume is shown based on the time unit
:returns: the Bar object
"""
### WARN: datazoom and time conflict, sliding till 1970..., need further look into pyeacharts
startdate = cftable.iloc[0]["date"]
if freq == "D":
# datedata = [d.to_pydatetime() for d in cftable["date"]]
datedata = pd.date_range(startdate, yesterdayobj(), freq="D")
selldata = [
[row["date"].to_pydatetime(), row["cash"]]
for _, row in cftable.iterrows()
if row["cash"] > 0
]
buydata = [
[row["date"].to_pydatetime(), row["cash"]]
for _, row in cftable.iterrows()
if row["cash"] < 0
]
elif freq == "W":
cfmerge = cftable.groupby([cftable["date"].dt.year, cftable["date"].dt.week])[
"cash"
].sum()
# datedata = [
# dt.datetime.strptime(str(a) + "4", "(%Y, %W)%w")
# for a, _ in cfmerge.iteritems()
# ]
datedata = pd.date_range(
startdate, yesterdayobj() + pd.Timedelta(days=7), freq="W-THU"
)
selldata = [
[dt.datetime.strptime(str(a) + "4", "(%G, %V)%w"), b]
for a, b in cfmerge.iteritems()
if b > 0
]
buydata = [
[dt.datetime.strptime(str(a) + "4", "(%G, %V)%w"), b]
for a, b in cfmerge.iteritems()
if b < 0
]
# %V pandas gives iso weeknumber which is different from python original %W or %U,
# see https://stackoverflow.com/questions/5882405/get-date-from-iso-week-number-in-python for more details
# python3.6+ required for %G and %V
# but now seems no equal distance between sell and buy data, no idea why
elif freq == "M":
cfmerge = cftable.groupby([cftable["date"].dt.year, cftable["date"].dt.month])[
"cash"
].sum()
# datedata = [
# dt.datetime.strptime(str(a) + "15", "(%Y, %m)%d")
# for a, _ in cfmerge.iteritems()
# ]
datedata = pd.date_range(
startdate, yesterdayobj() + pd.Timedelta(days=31), freq="MS"
)
selldata = [
[dt.datetime.strptime(str(a) + "1", "(%Y, %m)%d"), b]
for a, b in cfmerge.iteritems()
if b > 0
]
buydata = [
[dt.datetime.strptime(str(a) + "1", "(%Y, %m)%d"), b]
for a, b in cfmerge.iteritems()
if b < 0
]
else:
raise ParserFailure("no such freq tag supporting")
buydata = [[d, round(x, 1)] for d, x in buydata]
selldata = [[d, round(x, 1)] for d, x in selldata]
bar = Bar()
datedata = list(datedata)
bar.add_xaxis(xaxis_data=datedata)
# buydata should before selldata, since emptylist in the first line would make the output fig empty: may be bug in pyecharts
bar.add_yaxis(series_name="买入", yaxis_data=buydata)
bar.add_yaxis(series_name="卖出", yaxis_data=selldata)
bar.set_global_opts(
tooltip_opts=opts.TooltipOpts(
is_show=True,
trigger="axis",
trigger_on="mousemove",
axis_pointer_type="cross",
),
datazoom_opts=[opts.DataZoomOpts(range_start=90, range_end=100)],
)
if rendered:
return bar.render_notebook()
else:
return bar
def vtradecost(
self, cftable, unitcost=False, start=None, end=yesterdayobj(), rendered=True
):
"""
visualization giving the average cost line together with netvalue line as well as buy and sell points
:returns: pyecharts.line
"""
funddata = []
costdata = []
pprice = self.price[self.price["date"] <= end]
pcftable = cftable
if start is not None:
pprice = pprice[pprice["date"] >= start]
pcftable = pcftable[pcftable["date"] >= start]
for _, row in pprice.iterrows():
date = row["date"]
funddata.append(row["netvalue"])
if unitcost:
cost = 0
if (date - self.cftable.iloc[0].date).days >= 0:
cost = self.unitcost(date)
costdata.append(cost)
coords = []
# pcftable = pcftable[abs(pcftable["cash"]) > threhold]
for i, r in pcftable.iterrows():
coords.append([r.date, pprice[pprice["date"] <= r.date].iloc[-1]["netvalue"]])
upper = pcftable.cash.abs().max()
lower = pcftable.cash.abs().min()
if upper == lower:
upper = 2 * lower + 1 # avoid zero in denominator
def marker_factory(x, y):
buy = pcftable[pcftable["date"] <= x].iloc[-1]["cash"]
if buy < 0:
color = "#ff7733"
else:
color = "#3366ff"
size = (abs(buy) - lower) / (upper - lower) * 5 + 5
return opts.MarkPointItem(
coord=[x.date(), y],
itemstyle_opts=opts.ItemStyleOpts(color=color),
# this nested itemstyle_opts within MarkPointItem is only supported for pyechart>1.7.1
symbol="circle",
symbol_size=size,
)
line = Line()
line.add_xaxis([d.date() for d in pprice.date])
if unitcost:
line.add_yaxis(
series_name="持仓成本", y_axis=costdata, is_symbol_show=False,
)
line.add_yaxis(
series_name="基金净值",
y_axis=funddata,
is_symbol_show=False,
markpoint_opts=opts.MarkPointOpts(data=[marker_factory(*c) for c in coords],),
)
line.set_global_opts(
datazoom_opts=[
opts.DataZoomOpts(
is_show=True, type_="slider", range_start=50, range_end=100
),
opts.DataZoomOpts(
is_show=True,
type_="slider",
orient="vertical",
range_start=50,
range_end=100,
),
],
tooltip_opts=opts.TooltipOpts(
is_show=True,
trigger="axis",
trigger_on="mousemove",
axis_pointer_type="cross",
),
)
if rendered:
return line.render_notebook()
else:
return line
class trade:
"""
Trade class with fundinfo obj as input and its main attrs are cftable and remtable:
1. cftable: pd.Dataframe, 现金流量表,每行为不同变更日期,三列分别为 date,cash, share,标记对于某个投资标的
现金的进出和份额的变化情况,所有的份额数据为交易当时的不复权数据。基金份额折算通过流量表中一次性的份额增减体现。
2. remtable:pd.Dataframe, 持仓情况表,每行为不同变更日期,两列分别为 date 和 rem, rem 数据结构是一个嵌套的列表,
包含了不同时间买入仓位的剩余情况,详情参见 remain 模块。这一表格如非必需,避免任何直接调用。
:param infoobj: info object as the trading aim
:param status: status table, obtained from record class
"""
def __init__(self, infoobj, status, cftable=None, remtable=None):
self.aim = infoobj
code = self.aim.code
self.code = code
self.name = self.aim.name
self.price = self.aim.price
if (cftable is not None and remtable is None) or (
cftable is None and remtable is not None
):
raise ValueError(
"You must provide both `cftable` and `remtable` for incremental trade engine"
)
# 请确保提供的 cftable 和 remtable 在日期,份额等数据上是匹配的
if cftable is None:
self.cftable = pd.DataFrame([], columns=["date", "cash", "share"])
else:
self.cftable = cftable
if remtable is None:
self.remtable = pd.DataFrame([], columns=["date", "rem"])
else:
self.remtable = remtable
self.status = status.loc[:, ["date", code]]
self.status = self.status[self.status[code] != 0]
self._arrange()
def _arrange(self):
self.recorddate_set = set(self.status.date)
while 1:
try:
self._addrow()
except Exception as e:
if e.args[0] == "no other info to be add into cashflow table":
break
else:
raise e
def _addrow(self):
"""
Return cashflow table with one more line or raise an exception if there is no more line to add
The same logic also applies to rem table
关于对于一个基金多个操作存在于同一交易日的说明:无法处理历史买入第一笔同时是分红日的情形, 事实上也不存在这种情形。无法处理一日多笔买卖的情形。
同一日既有卖也有买不现实,多笔买入只能在 csv 上合并记录,由此可能引起份额计算 0.01 的误差。可以处理分红日买入卖出的情形。
分级份额折算日封闭无法买入,所以程序直接忽略当天的买卖。因此不会出现多个操作共存的情形。
"""
# the design on data remtable is disaster, it is very dangerous though works now
# possibly failing cases include:
# 买卖日记录是节假日,而顺延的日期恰好是折算日(理论上无法申赎)或分红日(可能由于 date 和 rdate 的错位而没有考虑到),
# 又比如周日申购记录,周一申购记录,那么周日记录会现金流记在周一,继续现金流标更新将从周二开始,周一数据被丢弃
code = self.aim.code
if len(self.cftable) == 0:
if len(self.status[self.status[code] != 0]) == 0:
raise Exception("no other info to be add into cashflow table")
i = 0
while self.status.iloc[i].loc[code] == 0:
i += 1
value = self.status.iloc[i].loc[code]
date = self.status.iloc[i].date
self.lastdate = date
if len(self.price[self.price["date"] >= date]) > 0:
date = self.price[self.price["date"] >= date].iloc[0]["date"]
else:
date = self.price[self.price["date"] <= date].iloc[-1]["date"]
# 这里没有像下边部分一样仔细处理单独的 lastdate,hopefully 不会出现其他奇怪的问题,有 case 再说
# https://github.com/refraction-ray/xalpha/issues/47
# 凭直觉这个地方的处理很可能还有其他 issue
if value > 0:
feelabel = 100 * value - int(100 * value)
if round(feelabel, 1) == 0.5:
# binary encoding, 10000.005 is actually 10000.0050...1, see issue #59
feelabel = feelabel - 0.5
if abs(feelabel) < 1e-4:
feelabel = 0
else:
feelabel = None
value = int(value * 100) / 100
assert feelabel is None or feelabel >= 0.0, "自定义申购费必须为正值"
rdate, cash, share = self.aim.shengou(value, date, fee=feelabel)
rem = rm.buy([], share, rdate)
else:
raise TradeBehaviorError("You cannot sell first when you never buy")
elif len(self.cftable) > 0:
# recorddate = list(self.status.date)
if not getattr(self, "lastdate", None):
lastdate = self.cftable.iloc[-1].date + pd.Timedelta(1, unit="d")
else:
lastdate = self.lastdate + pd.Timedelta(1, unit="d")
while (lastdate not in self.aim.specialdate) and (
(lastdate not in self.recorddate_set)
or (
(lastdate in self.recorddate_set)
and (
self.status[self.status["date"] == lastdate].loc[:, code].any()
== 0
)
)
):
lastdate += pd.Timedelta(1, unit="d")
if (lastdate - yesterdayobj()).days >= 1:
raise Exception("no other info to be add into cashflow table")
if (lastdate - yesterdayobj()).days >= 1:
raise Exception("no other info to be add into cashflow table")
date = lastdate
# 无净值日优先后移,无法后移则前移
# 还是建议日期记录准确,不然可能有无法完美兼容的错误出现
if len(self.price[self.price["date"] >= date]) > 0:
date = self.price[self.price["date"] >= date].iloc[0]["date"]
else:
date = self.price[self.price["date"] <= date].iloc[-1]["date"]
if date != lastdate and date in list(self.status.date):
# 日期平移到了其他记录日,很可能出现问题!
logger.warning(
"账单日期 %s 非 %s 的净值记录日期,日期智能平移后 %s 与账单其他日期重合!交易处理极可能出现问题!! "
"靠后日期的记录被覆盖" % (lastdate, self.code, date)
)
self.lastdate = lastdate
if date > lastdate:
self.lastdate = date
# see https://github.com/refraction-ray/xalpha/issues/27, begin new date from last one in df is not reliable
label = self.aim.dividend_label # 现金分红 0, 红利再投 1
cash = 0
share = 0
rem = self.remtable.iloc[-1].rem
rdate = date
if (lastdate in self.recorddate_set) and (date not in self.aim.zhesuandate):
# deal with buy and sell and label the fenhongzaitouru, namely one label a 0.05 in the original table to label fenhongzaitouru
value = self.status[self.status["date"] <= lastdate].iloc[-1].loc[code]
if date in self.aim.fenhongdate: # 0.05 的分红行为标记,只有分红日才有效
fenhongmark = round(10 * value - int(10 * value), 1)
if fenhongmark == 0.5 and label == 0:
label = 1 # fenhong reinvest
value = value - math.copysign(0.05, value)
elif fenhongmark == 0.5 and label == 1:
label = 0
value = value - math.copysign(0.05, value)
if value > 0: # value stands for purchase money
feelabel = 100 * value - int(100 * value)
if int(10 * feelabel) == 5:
feelabel = feelabel - 0.5
else:
feelabel = None
value = int(value * 100) / 100
rdate, dcash, dshare = self.aim.shengou(value, date, fee=feelabel)
rem = rm.buy(rem, dshare, rdate)
elif value < -0.005: # value stands for redemp share
feelabel = int(100 * value) - 100 * value
if int(10 * feelabel) == 5:
feelabel = feelabel - 0.5
else:
feelabel = None
value = int(value * 100) / 100
rdate, dcash, dshare = self.aim.shuhui(
-value, date, self.remtable.iloc[-1].rem, fee=feelabel
)
_, rem = rm.sell(rem, -dshare, rdate)
elif value >= -0.005 and value < 0:
# value now stands for the ratio to be sold in terms of remain positions, -0.005 stand for sell 100%
remainshare = sum(
self.cftable[self.cftable["date"] <= date].loc[:, "share"]
)
ratio = -value / 0.005
rdate, dcash, dshare = self.aim.shuhui(
remainshare * ratio, date, self.remtable.iloc[-1].rem, 0
)
_, rem = rm.sell(rem, -dshare, rdate)
else: # in case value=0, when specialday is in record day
rdate, dcash, dshare = date, 0, 0
cash += dcash
share += dshare
if date in self.aim.specialdate: # deal with fenhong and xiazhe
comment = self.price[self.price["date"] == date].iloc[0].loc["comment"]
if isinstance(comment, float):
if comment < 0:
dcash2, dshare2 = (
0,
sum([myround(sh * (-comment - 1)) for _, sh in rem]),
) # xiazhe are seperately carried out based on different purchase date
rem = rm.trans(rem, -comment, date)
# myround(sum(cftable.loc[:,'share'])*(-comment-1))
elif comment > 0 and label == 0:
dcash2, dshare2 = (
myround(sum(self.cftable.loc[:, "share"]) * comment),
0,
)
rem = rm.copy(rem)
elif comment > 0 and label == 1:
dcash2, dshare2 = (
0,
myround(
sum(self.cftable.loc[:, "share"])
* (
comment
/ self.price[self.price["date"] == date]
.iloc[0]
.netvalue
)
),
)
rem = rm.buy(rem, dshare2, date)
cash += dcash2
share += dshare2
else:
raise ParserFailure("comments not recognized")
self.cftable = self.cftable.append(
pd.DataFrame([[rdate, cash, share]], columns=["date", "cash", "share"]),
ignore_index=True,
)
self.remtable = self.remtable.append(
pd.DataFrame([[rdate, rem]], columns=["date", "rem"]), ignore_index=True
)
def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):
"""
give the xirr rate for all the trade of the aim before date (virtually sold out on date)
:param date: string or obj of datetime, the virtually sell-all date
:param startdate: string or obj of datetime, the beginning date of calculation, default from first buy
"""
return xirrcal(self.cftable, [self], date, startdate, guess)
def dailyreport(self, date=yesterdayobj()):
date = convert_date(date)
partcftb = self.cftable[self.cftable["date"] <= date]
value = self.get_netvalue(date)
if len(partcftb) == 0:
reportdict = {
"基金名称": [self.name],
"基金代码": [self.code],
"当日净值": [value],
"持有份额": [0],
"基金现值": [0],
"基金总申购": [0],
"历史最大占用": [0],
"基金分红与赎回": [0],
"基金收益总额": [0],
}
df = pd.DataFrame(reportdict, columns=reportdict.keys())
return df
# totinput = myround(-sum(partcftb.loc[:,'cash']))
totinput = myround(
-sum([row["cash"] for _, row in partcftb.iterrows() if row["cash"] < 0])
)
totoutput = myround(
sum([row["cash"] for _, row in partcftb.iterrows() if row["cash"] > 0])
)
currentshare = myround(sum(partcftb.loc[:, "share"]))
currentcash = myround(currentshare * value)
btnk = bottleneck(partcftb)
turnover = turnoverrate(partcftb, date)
ereturn = myround(currentcash + totoutput - totinput)
if currentshare == 0:
unitcost = 0
else:
unitcost = round((totinput - totoutput) / currentshare, 4)
if btnk == 0:
returnrate = 0
else:
returnrate = round((ereturn / btnk) * 100, 4)
reportdict = {
"基金名称": [self.name],
"基金代码": [self.code],
"当日净值": [value],
"单位成本": [unitcost],
"持有份额": [currentshare],
"基金现值": [currentcash],
"基金总申购": [totinput],
"历史最大占用": [btnk],
"基金持有成本": [totinput - totoutput],
"基金分红与赎回": [totoutput],
"换手率": [turnover],
"基金收益总额": [ereturn],
"投资收益率": [returnrate],
}
df = pd.DataFrame(reportdict, columns=reportdict.keys())
return df
def get_netvalue(self, date=yesterdayobj()):
df = self.price[self.price["date"] <= date]
if df is None or len(df) == 0:
return 0
return df.iloc[-1].netvalue
def briefdailyreport(self, date=yesterdayobj()):
"""
quick summary of highly used attrs for trade
:param date: string or object of datetime
:returns: dict with several attrs: date, unitvalue, currentshare, currentvalue
"""
date = convert_date(date)
partcftb = self.cftable[self.cftable["date"] <= date]
if len(partcftb) == 0:
return {}
unitvalue = self.get_netvalue(date)
currentshare = myround(sum(partcftb.loc[:, "share"]))
currentvalue = myround(currentshare * unitvalue)
return {
"date": date,
"unitvalue": unitvalue,
"currentshare": currentshare,
"currentvalue": currentvalue,
}
def unitcost(self, date=yesterdayobj()):
"""
give the unitcost of fund positions
:param date: string or object of datetime
:returns: float number of unitcost
"""
partcftb = self.cftable[self.cftable["date"] <= date]
if len(partcftb) == 0:
return 0
totnetinput = myround(-sum(partcftb.loc[:, "cash"]))
currentshare = self.briefdailyreport(date).get("currentshare", 0)
# totnetinput
if currentshare > 0:
unitcost = totnetinput / currentshare
else:
unitcost = 0
return unitcost
def v_tradevolume(self, freq="D", rendered=True):
"""
visualization on trade summary
:param freq: string, "D", "W" and "M" are supported
:returns: pyecharts.charts.bar.render_notebook()
"""
return vtradevolume(self.cftable, freq=freq, rendered=rendered)
def v_tradecost(self, start=None, end=yesterdayobj(), rendered=True):
"""
visualization giving the average cost line together with netvalue line
:returns: pyecharts.line
"""
return vtradecost(
self, self.cftable, unitcost=True, start=start, end=end, rendered=rendered
)
def v_totvalue(self, end=yesterdayobj(), rendered=True, vopts=None):
"""
visualization on the total values daily change of the aim
"""
partp = self.price[self.price["date"] >= self.cftable.iloc[0].date]
# 多基金账单时起点可能非该基金持有起点
partp = partp[partp["date"] <= end]
date = [d.date() for d in partp.date]
valuedata = [
self.briefdailyreport(d).get("currentvalue", 0) for d in partp.date
]
line = Line()
if vopts is None:
vopts = line_opts
line.add_xaxis(date)
line.add_yaxis(series_name="持仓总值", y_axis=valuedata, is_symbol_show=False)
line.set_global_opts(**vopts)
if rendered:
return line.render_notebook()
else:
return line
def __repr__(self):
return self.name + " 交易情况"
"""
可视化图的合并可参考以下代码 v0.5.5
from pyecharts import Overlap
overlap = Overlap()
overlap.add(self.v_tradecost())
overlap.add(self.v_tradevolume(bar_category_gap='95%'), yaxis_index=1,is_add_yaxis=True)
overlap
"""
class itrade(trade):
"""
场内交易,只包含 cftable 现金流表
"""
def __init__(self, code, status, name=None):
"""
:param code: str. 代码格式与 :func:`xalpha.universal.get_daily` 要求相同
:param status: 记账单或 irecord 类。
:param name: Optional[str]. 可提供标的名称。
"""
self.code = code
if isinstance(status, irecord):
self.status = status.filter(code)
else:
self.status = status[status.code == code]
# self.cftable = pd.DataFrame([], columns=["date", "cash", "share"])
try:
self.price = xu.get_daily(
self.code, start=self.status.iloc[0]["date"].strftime("%Y-%m-%d")
)
self.price["netvalue"] = self.price["close"]
except Exception as e:
logger.warning(
"%s when trade trying to get daily price of %s" % (e, self.code)
)
self.price = None
self._arrange()
if not name:
try:
self.name = get_rt(code)["name"]
except:
self.name = code
self.type_ = None
def get_type(self):
if not self.type_:
code = self.code
if (
code.startswith("SZ15900")
or code.startswith("SH5116")
or code.startswith("SH5117")
or code.startswith("SH5118")
or code.startswith("SH5119")
or code.startswith("SH5198")
):
self.type_ = "货币基金"
elif (
code.startswith("SH5")
or code.startswith("SZ16")
or code.startswith("SZ159")
):
self.type_ = "场内基金"
elif code.startswith("SH11") or code.startswith("SZ12"):
if self.name.endswith("转债"):
self.type_ = "可转债"
else:
self.type_ = "债券"
elif code.startswith("SZ399") or code.startswith("SH000"):
self.type_ = "指数"
elif (
code.startswith("SH60")
or code.startswith("SZ00")
or code.startswith("SZ20")
or code.startswith("SZ30")
):
self.type_ = "股票"
else:
self.type_ = "其他"
return self.type_
def _arrange(self):
d = {"date": [], "cash": [], "share": []}
for _, r in self.status.iterrows():
d["date"].append(r.date)
if r.share == 0:
d["cash"].append(-r.value)
d["share"].append(0)
elif r.value == 0:
d["cash"].append(0)
d["share"].append(r.share) # 直接记录总的应增加+或减少的份额数
else:
d["cash"].append(-r.value * r.share - abs(r.fee)) # 手续费总是正的,和买入同号
d["share"].append(r.share)
self.cftable =
|
pd.DataFrame(d)
|
pandas.DataFrame
|
import re
from loguru import logger
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import (
MinMaxScaler,
OneHotEncoder
)
def set_df_index(
df: pd,
df_index_col: str,
):
"""
Description
-----------
Sets an index of the supplied column name for the supplied DataFrame
Parameters
----------
df: pandas.core.frame.DataFrame or pandas.core.series.Series
The dataframe to be processed
df_index: list
The names of the column to set as the index
Returns
-------
df_out: pandas.DataFrame
The processed pandas Dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df_out = set_df_index(
df=df
df_index_col="col1"
)
"""
logger.info("Running set_df_index()")
try:
df_out = df.copy()
# Handle single records which are passed as a series
if isinstance(df_out, pd.core.series.Series):
df_out = (
pd.DataFrame(df_out)
.transpose()
.set_index(df_index_col, drop=True)
)
# Handle multiple records which are passed as a DataFrame
else:
df_out = df_out.set_index(df_index_col, drop=True)
return df_out
except Exception:
logger.exception("Error running set_df_index()")
def convert_to_str(
df: pd.core.frame.DataFrame,
convert_to_str_cols: list
):
"""
Description
-----------
Converts the supplied columns to strings for the supplied DataFrame
Parameters
----------
df: pandas.core.frame.DataFrame
The dataframe to be processed
convert_to_str_cols: list
The names of the columns to convert to strings
Returns
-------
df_out: pandas.DataFrame
The processed pandas Dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df_out = convert_to_str(
df=df
convert_to_str_cols="col1"
)
"""
logger.info("Running convert_to_str()")
try:
df_out = df.copy()
for column in convert_to_str_cols:
df_out[column] = df_out[column].astype(str)
return df_out
except Exception:
logger.exception("Error running convert_to_str()")
def drop_columns(
df: pd.core.frame.DataFrame,
drop_column_names: list,
):
"""
Description
-----------
Removes the specified columns from the supplied DataFrame
Parameters
----------
df: pandas.core.frame.DataFrame
The dataframe to be processed
drop_column_names: list
The names of the columns to remove
Returns
-------
df_out: pandas.DataFrame
The processed pandas Dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df_out = drop_columns(
df=df
drop_column_names=["col1, "col2]
)
"""
logger.info("Running drop_columns()")
try:
df_out = df.drop(labels=drop_column_names, axis=1)
return df_out
except Exception:
logger.exception("Error running drop_columns()")
def create_title_cat(
df: pd.core.frame.DataFrame,
source_column: str,
dest_column: str,
title_codes: dict
):
"""
Description
-----------
Feature Engineers the title column of a pandas Dataframe by extracting the
title from the source column via regex, coding the values and creating the
dest_column.
Contains the extract title sub-function which extracts the blocks of text
and picks the group containing the title which will always be at index 1.
Parameters
----------
df: pandas.core.frame.DataFrame
The dataframe to be processed
source_column: str
The coulumn containing the data from which to extract the title.
dest_column: str
The new column to create containing the extracted title.
title_codes: dict
Dictionary containing the title values as keys (e.g. Mr, Mrs, mme etc.)
and the corresponding codes as values (e.g. gen_male, other_female etc.)
Returns
-------
df_out: pandas.DataFrame
The processed pandas Dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df_out = create_title_cat(
df=df
source_column="col1",
dest_column="col2"
title_codes: {
"Mr": "gen_male".
"Mrs: "gen_female"s
}
)
"""
logger.info("Running create_title_cat()")
# Define the extract_title function
def extract_title(
row: pd.core.series.Series,
source_column: str
):
"""
Extracts the title from the supplied specified title_source_column via
a regex. Applied to a pandas DataFrame
"""
title_search = re.search(r' ([A-Za-z]+)\.', row[source_column])
if title_search:
title = title_search.group(1)
else:
title = ""
return title
try:
# Apply the extract_title function to the dataframe
df_out = df.copy()
df_out[dest_column] = (
df_out.apply(
extract_title,
args=([source_column]),
axis=1
)
.replace(title_codes)
)
return df_out
except Exception:
logger.exception("Error running create_title_cat()")
def impute_age(
df: pd.core.frame.DataFrame,
source_column: str,
title_cat_column: str,
age_codes: dict
):
"""
If the age of a passenger is missing, infer this based upon the passenger
title.
Parameters
----------
df: pandas.DataFrame
The dataframe to be processed.
source_column: str
The column containing the age values.
title_cat_column: str
The column containing the title category values.
age_codes: dict
Dictionary containing the title category values as keys (e.g. "gen_male"
"gen_female", and the age to infer as values.
Returns
-------
df: pandas.DataFrame
The processed dataframe.
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df = impute_age(
df=df,
source_column="Age",
title_cat_column="TitleCat",
age_codes=dict(
gen_male=30,
gen_female=35
. . .
)
)
"""
logger.info("Running impute_age()")
def infer_age(
row: pd.core.series.Series,
source_column: str,
title_cat_column: str,
age_codes: dict
):
"""Infers the age of a passenger based upon the passenger title,
Applied to a pandas dataframe"""
if (pd.isnull(row[source_column])):
# Iterate through the codes and assign an age based upon the title
for key, value in age_codes.items():
if row[title_cat_column] == key:
age = value
# Else return the age as an integer
else:
age = int(row[source_column])
return age
try:
# Apply the infer_age function to the pandas dataframe
df_out = df.copy()
df_out[source_column] = (
df_out.apply(
infer_age,
args=([source_column, title_cat_column, age_codes]),
axis=1
)
)
return df_out
except Exception:
logger.exception("Error running infer_age()")
def create_family_size(
df: pd.core.frame.DataFrame,
source_columns: list,
dest_column: str
):
"""
Description
-----------
Create a column for family_size via summing the source_columns.
Parameters
----------
df: pd.core.frame.DataFrame
The dataframe to be processed.
source_columns: list
The columns to be summed to calculate the family size.
dest_column: str
The destination column to contain the family size values.
Returns
-------
df_out: pd.core.frame.DataFrame.
The processed dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df = create_family_size(
df=df,
source_columns=["col1", "col2"],
dest_column="col3
)
"""
logger.info("Running create_family_size()")
try:
df_out = df.copy()
df_out[dest_column] = df_out.apply(
lambda row: row[source_columns].sum() + 1,
axis=1
)
return df_out
except Exception:
logger.exception("Error running create_family_size()")
def impute_missing_values(
df: pd.core.frame.DataFrame,
strategy: str
):
"""
Description
-----------
Impute missing values for the dataframe via the specified strategy. Creates
separate imputers for:
* np.nan
* None
* "" (empty strings)
* int
Parameters
----------
df: pd.core.frame.DataFrame
The dataframe to be processed.
strategy: str
The strategy to use for imputation
Returns
-------
df_out: pd.core.frame.DataFrame.
The processed dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df = impute_missing_values(
df=df,
strategy="most_frequent"
)
"""
logger.info("Running impute_missing_values()")
try:
df_out = df.copy()
# Create imputers for various dtypes
nan_imputer = SimpleImputer(
missing_values=np.nan,
strategy=strategy
)
none_imputer = SimpleImputer(
missing_values=None,
strategy=strategy
)
str_imputer = SimpleImputer(
missing_values="",
strategy=strategy
)
int_imputer = SimpleImputer(
missing_values=int,
strategy=strategy
)
df_out[:] = nan_imputer.fit_transform(df_out)
df_out[:] = none_imputer.fit_transform(df_out)
df_out[:] = str_imputer.fit_transform(df_out)
df_out[:] = int_imputer.fit_transform(df_out)
return df_out
except Exception:
logger.exception("Error running impute_missing_values()")
def scaler(
df: pd.core.frame.DataFrame,
scale_columns: str
):
"""
Description
-----------
Scale the supplied scale_columns for the dataframe.
Parameters
----------
df: pd.core.frame.DataFrame
The dataframe to be processed.
scale_columns: str
The columsn to apply scaling to.
Returns
-------
df_out: pd.core.frame.DataFrame.
The processed dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df = scaler(
df=df,
scale_columns=["col1", "col2"]
)
"""
logger.info("Running scaler()")
try:
df_out = df.copy()
for column in scale_columns:
scale = MinMaxScaler()
df_out[column] = scale.fit_transform(
df_out[column].values.reshape(-1, 1)
)
return df_out
except Exception:
logger.exception("Error running scaler()")
def one_hot_encoder(
df: pd.core.frame.DataFrame,
uid: str,
one_hot_columns: list
):
"""
Description
-----------
One hot encode the supplied scale_columns for the dataframe.
Parameters
----------
df: pd.core.frame.DataFrame
The dataframe to be processed.
one_hot_columns: str
The columns to apply one hot encoding to.
Returns
-------
df_out: pd.core.frame.DataFrame.
The processed dataframe
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
df = one_hot_encoder(
df=df,
one_hot_columns=["col1", "col2"]
)
"""
logger.info("running one_hot_encoder()")
try:
df_out = df.copy()
# Reset to a generic index to allow merge by position
df_out.reset_index(inplace=True)
for column in one_hot_columns:
# Unpack column dictionary
col_name = column["col_name"]
categories = column["categories"]
# Set the column type as categorical
df_out[col_name] = df_out[col_name].astype("category")
# Create the encoder
oh_enc = OneHotEncoder(
categories=[categories],
sparse=False
)
# Fit the data to the encoder
enc_data = oh_enc.fit_transform(df_out[[col_name]].values)
# Set the column names
columns = [f"{col_name}_{col}" for col in categories]
# Create the dataframe of encoded data
df_oh =
|
pd.DataFrame(enc_data, columns=columns)
|
pandas.DataFrame
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
r" \[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors="ignore")
def test_arg_for_errors_in_astype_dictlist(self):
# GH-25905
df = pd.DataFrame(
[
{"a": "1", "b": "16.5%", "c": "test"},
{"a": "2.2", "b": "15.3", "c": "another_test"},
]
)
expected = pd.DataFrame(
[
{"a": 1.0, "b": "16.5%", "c": "test"},
{"a": 2.2, "b": "15.3", "c": "another_test"},
]
)
type_dict = {"a": "float64", "b": "float64", "c": "object"}
result = df.astype(dtype=type_dict, errors="ignore")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B":
|
pd.Categorical(["a", "b"])
|
pandas.Categorical
|
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
from seffaflik.elektrik import santraller as __santraller
__first_part_url = "production/"
def organizasyonlar():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyon bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyon Bilgileri(Id, Adı, EIC Kodu, Kısa Adı, Durum)
"""
try:
particular_url = __first_part_url + "dpp-organization"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["organizations"])
df.rename(index=str,
columns={"organizationId": "Id", "organizationName": "Adı",
"organizationETSOCode": "EIC Kodu", "organizationShortName": "Kısa Adı",
"organizationStatus": "Durum"},
inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def organizasyon_veris_cekis_birimleri(eic):
"""
İlgili eic değeri için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyonun uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
eic : metin formatında organizasyon eic kodu
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonun UEVÇB Bilgileri(Id, Adı, EIC Kodu)
"""
if __dogrulama.__kgup_girebilen_organizasyon_dogrulama(eic):
try:
particular_url = __first_part_url + "dpp-injection-unit-name?organizationEIC=" + eic
json = __make_requests(particular_url)
df_unit = __pd.DataFrame(json["body"]["injectionUnitNames"])
df_unit.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu"}, inplace=True)
df_unit = df_unit[["Id", "Adı", "EIC Kodu"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df_unit
def tum_organizasyonlar_veris_cekis_birimleri():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyon ve bu organizasyonların
uzlaştırmaya esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonlar ve UEVÇB Bilgileri(Org Id, Org Adı, Org EIC Kodu, Org Kısa Adı, Org Durum, UEVÇB Id,
UEVÇB Adı, UEVÇB EIC Kodu)
"""
list_org = organizasyonlar()[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]].to_dict("records")
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.map(__organizasyon_cekis_birimleri, list_org, chunksize=1)
return __pd.concat(list_df_unit).reset_index(drop=True)
def kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı kesinleşmiş günlük üretim planı (KGÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KGUP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle
,Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "dpp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik
KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların
uzlaştırmaya esas veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı emre amade kapasite (EAK) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için eak bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
EAK (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle,
Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "aic" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["aicList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların saatlik EAK bilgilerini
vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
EAK Girebilen Organizasyonların EAK Değerleri (Tarih, Saat, EAK)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların uzlaştırmaya esas
veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def kudup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_id="", uevcb_id=""):
"""
İlgili tarih aralığı için gün içi piyasasının kapanışından sonra yapılan güncellemeyle kaynak bazlı Kesinleşmiş
Uzlaştırma Dönemi Üretim Planı (KUDÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_id : metin formatında organizasyon id (Varsayılan: "")
uevcb_id : metin formatında uevcb id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KUDÜP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür,
Biyokütle, Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "sbfgp" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&organizationId=" + organizasyon_id + "&uevcbId=" + uevcb_id
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik Uzlaştırmaya Esas Variş Miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Uzlaştırmaya Esas Veriş Miktarı (Tarih, Saat, UEVM)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "ssv-categorized" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["ssvList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"asphaltite": "Asfaltit Kömür", "river": "Akarsu", "dam": "Barajlı",
"biomass": "Biyokütle", "naturalGas": "Doğalgaz", "fueloil": "Fuel Oil",
"importedCoal": "İthal Kömür", "geothermal": "Jeo Termal", "lignite": "Linyit",
"naphtha": "Nafta", "lng": "LNG", "wind": "Rüzgar", "stonecoal": "Taş Kömür",
"international": "Uluslararası", "total": "Toplam", "other": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Asfaltit Kömür", "Taş Kömür", "Biyokütle", "Nafta", "LNG", "Uluslararası",
"Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), santral_id=""):
"""
İlgili tarih aralığı için lisanslı santrallerin toplam gerçek zamanlı üretim bilgisini vermektedir.
Not: "santral_id" değeri girildiği taktirde santrale ait gerçek zamanlı üretim bilgisini vermektedir.
Girilmediği taktirde toplam gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
santral_id : metin yada tam sayı formatında santral id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Üretim("Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar", "Güneş",
"Fuel Oil", "Jeo Termal", "Asf<NAME>", "Ta<NAME>", "Biokütle", "Nafta", "LNG", "Uluslararası",
"Toplam")
"""
if __dogrulama.__baslangic_bitis_tarih_id_dogrulama(baslangic_tarihi, bitis_tarihi, santral_id):
if santral_id == "":
return __gerceklesen(baslangic_tarihi, bitis_tarihi)
else:
return __santral_bazli_gerceklesen(baslangic_tarihi, bitis_tarihi, santral_id)
def tum_santraller_gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için tüm lisanslı santrallerin gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Tüm lisanslı santrallerin gerçek zamanlı üretim Değerleri (Tarih, Saat, Santral Üretimleri)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
sant = __santraller.gercek_zamanli_uretim_yapan_santraller()
list_sant = sant[["Id", "Kısa Adı"]].to_dict("records")
list_sant_len = len(list_sant)
list_sant = list(
zip([baslangic_tarihi] * list_sant_len, [bitis_tarihi] * list_sant_len, list_sant))
list_sant = list(map(list, list_sant))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__gerceklesen_santral, list_sant, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right:
|
__pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True)
|
pandas.merge
|
#!/usr/bin/env python2
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
This module contains one main class.
The BenchmarkRun class creates a dataframe containing the raw data used for DDG analysis. This dataframe is then used to
performs analysis for the particular run or between another runs.
This class should not be specific to a particular computational method so it can be reusable for generic monomeric stability
DDG analysis.
'''
import os
import sys
import shutil
import numpy
import pprint
import shlex
import tempfile
import copy
import io
import gzip
import time
import datetime
import getpass
import multiprocessing as mp
try: import json
except: import simplejson as json
import pandas
from klab import colortext
from klab.Reporter import Reporter
import klab.latex.latex_report as lr
from klab.fs.fsio import read_file, write_file, write_temp_file
from klab.loggers.simple import ReportingObject
from klab.gfx.color_definitions import rgb_colors as plot_colors
from klab.stats.misc import fraction_correct, fraction_correct_pandas, add_fraction_correct_values_to_dataframe, get_xy_dataset_statistics_pandas, format_stats, float_format_2sigfig, float_format_3sigfig, subtract_row_pairs_for_display
from klab.benchmarking.analysis.plot import plot_pandas
from klab.plot.rtools import RInterface
from klab.plot import general_matplotlib
class BenchmarkRun(ReportingObject):
'''A object to contain benchmark run data which can be used to analyze that run or else to cross-analyze the run with another run.'''
# Class variables
amino_acid_details = {}
CAA, PAA, HAA = set(), set(), set()
# Human-readable descriptions for the volume breakdown
by_volume_descriptions = dict(
SL = 'small-to-large mutations',
LS = 'large-to-small mutations',
XX = 'no change in volume',
)
csv_headers = [
'DatasetID', 'PDBFileID', 'Mutations', 'NumberOfMutations', 'Experimental', 'Predicted', 'AbsoluteError', 'StabilityClassification',
'ResidueCharges', 'VolumeChange',
'WildTypeDSSPType', 'WildTypeDSSPSimpleSSType', 'WildTypeDSSPExposure',
'WildTypeSCOPClass', 'WildTypeSCOPFold', 'WildTypeSCOPClassification',
'WildTypeExposure', 'WildTypeAA', 'MutantAA', 'HasGPMutation',
'PDBResolution', 'PDBResolutionBin', 'NumberOfResidues', 'NumberOfDerivativeErrors',
]
def __init__(self, benchmark_run_name, dataset_cases, analysis_data, contains_experimental_data = True, benchmark_run_directory = None, use_single_reported_value = False,
ddg_analysis_type = None,
calculate_scalar_adjustments = True,
description = None, dataset_description = None, credit = None, generate_plots = True, report_analysis = True, include_derived_mutations = False, recreate_graphs = False, silent = False, burial_cutoff = 0.25,
additional_join_parameters = {},
stability_classication_x_cutoff = 1.0, stability_classication_y_cutoff = 1.0, use_existing_benchmark_data = False, store_data_on_disk = True, misc_dataframe_attributes = {},
terminal_width = 200, restrict_to = set(), remove_cases = set()):
self.contains_experimental_data = contains_experimental_data
self.analysis_sets = [''] # some subclasses store values for multiple analysis sets
self.calculate_scalar_adjustments = calculate_scalar_adjustments
self.csv_headers = copy.deepcopy(self.__class__.csv_headers)
self.additional_join_parameters = additional_join_parameters
if 'ddg_analysis_type' in additional_join_parameters:
if ddg_analysis_type != None:
assert( ddg_analysis_type == additional_join_parameters['ddg_analysis_type']['long_name'] )
self.ddg_analysis_type = additional_join_parameters['ddg_analysis_type']['long_name']
else:
assert( ddg_analysis_type != None )
self.ddg_analysis_type = ddg_analysis_type
if not self.contains_experimental_data:
self.csv_headers.remove('Experimental')
self.csv_headers.remove('AbsoluteError')
self.csv_headers.remove('StabilityClassification')
self.terminal_width = terminal_width # Used for printing the dataframe to a terminal. Set this to be less than the width of your terminal in columns.
self.amino_acid_details, self.CAA, self.PAA, self.HAA = BenchmarkRun.get_amino_acid_details()
self.benchmark_run_name = benchmark_run_name
self.benchmark_run_directory = benchmark_run_directory
self.dataset_cases = copy.deepcopy(dataset_cases)
self.analysis_data = copy.deepcopy(analysis_data)
self.analysis_directory = None
self.subplot_directory = None
self.restrict_to = restrict_to
self.remove_cases = remove_cases
self.use_single_reported_value = use_single_reported_value
self.description = description
self.dataset_description = dataset_description
self.credit = credit
self.generate_plots = generate_plots
self.report_analysis = report_analysis
self.silent = silent
self.include_derived_mutations = include_derived_mutations
self.burial_cutoff = burial_cutoff
self.recreate_graphs = recreate_graphs
self.stability_classication_x_cutoff = stability_classication_x_cutoff
self.stability_classication_y_cutoff = stability_classication_y_cutoff
self.scalar_adjustments = {}
self.store_data_on_disk = store_data_on_disk
self.misc_dataframe_attributes = misc_dataframe_attributes
assert(credit not in self.misc_dataframe_attributes)
self.misc_dataframe_attributes['Credit'] = credit
self.metric_latex_objects = []
self.stored_metrics_df = pandas.DataFrame()
if self.store_data_on_disk:
# This may be False in some cases e.g. when interfacing with a database
self.analysis_csv_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.csv')
self.analysis_json_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.json')
self.analysis_raw_data_input_filepath = os.path.join(self.benchmark_run_directory, 'benchmark_data.json')
self.analysis_pandas_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.pandas')
assert(os.path.exists(self.analysis_csv_input_filepath))
assert(os.path.exists(self.analysis_json_input_filepath))
assert(os.path.exists(self.analysis_raw_data_input_filepath))
else:
self.analysis_csv_input_filepath, self.analysis_json_input_filepath, self.analysis_raw_data_input_filepath, self.analysis_pandas_input_filepath = None, None, None, None
self.use_existing_benchmark_data = use_existing_benchmark_data
self.ddg_analysis_type_description = None
self.filter_data()
def add_stored_metric_to_df(self, case_description, case_length, case_stats):
# Reformat statistics to put a column for each stat type
stats = {}
for case_stat in case_stats:
stats[ case_stat[0] ] = [case_stat[1]]
stats[ case_stat[0] + '-p-val' ] = [case_stat[2]]
df = pandas.DataFrame.from_dict(stats)
num_rows = len(df.index)
df.loc[:,'case_description'] = pandas.Series([case_description for x in range(num_rows)], index=df.index)
df.loc[:,'benchmark_run_name'] = pandas.Series([self.benchmark_run_name for x in range(num_rows)], index=df.index)
df.loc[:,'n'] = pandas.Series([case_length for x in range(num_rows)], index=df.index)
self.stored_metrics_df = pandas.concat([self.stored_metrics_df, df])
def filter_data(self):
'''A very rough filtering step to remove certain data.
todo: It is probably best to do this do the actual dataframe rather than at this point.
todo: We currently only handle one filtering criterium.
'''
if not self.dataset_cases or not self.analysis_data:
# colortext.error('No dataset cases or analysis (DDG) data were passed. Cannot filter the data. If you are using an existing dataframe, this may explain why no data was passed.')
return
if self.restrict_to or self.remove_cases:
# Remove any cases with missing data
available_cases = set(self.analysis_data.keys())
missing_dataset_cases = [k for k in list(self.dataset_cases.keys()) if k not in available_cases]
for k in missing_dataset_cases:
del self.dataset_cases[k]
cases_to_remove = set()
if self.restrict_to:
# Remove cases which do not meet the restriction criteria
if 'Exposed' in self.restrict_to:
for k, v in self.dataset_cases.items():
for m in v['PDBMutations']:
if (m.get('ComplexExposure') or m.get('MonomericExposure')) <= self.burial_cutoff:
cases_to_remove.add(k)
break
if self.remove_cases:
# Remove cases which meet the removal criteria
if 'Exposed' in self.remove_cases:
for k, v in self.dataset_cases.items():
for m in v['PDBMutations']:
if (m.get('ComplexExposure') or m.get('MonomericExposure')) > self.burial_cutoff:
cases_to_remove.add(k)
break
if cases_to_remove:
colortext.warning('Filtering out {0} records.'.format(len(cases_to_remove)))
for k in cases_to_remove:
del self.dataset_cases[k]
del self.analysis_data[k]
def __repr__(self):
'''Simple printer - we print the dataframe.'''
with pandas.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', self.terminal_width):
return '{0}'.format(self.dataframe)
@staticmethod
def get_analysis_set_fieldname(prefix, analysis_set):
if analysis_set:
return '{0}_{1}'.format(prefix, analysis_set)
else:
return prefix
@staticmethod
def get_amino_acid_details():
if not BenchmarkRun.amino_acid_details:
# Amino acid properties
polarity_map = {'polar' : 'P', 'charged' : 'C', 'hydrophobic' : 'H'}
aromaticity_map = {'aliphatic' : 'L', 'aromatic' : 'R', 'neither' : '-'}
amino_acid_detail_headers = 'Code,Long code,Name,Polarity,Aromaticity,Hydrophobicity pH7,Sidechain acidity,pKa,Average mass,van der Waals volume,Size,Is tiny?'
amino_acid_details_ = [
'A,ALA,Alanine,non-polar,aliphatic,hydrophobic,neutral,NULL,71.0788,67,small,1',
'C,CYS,Cysteine,polar,neither,hydrophilic,neutral,8.7,103.1388,86,small,1',
'D,ASP,Aspartic acid,charged,neither,hydrophilic,acidic,3.9,115.0886,91,small,0',
'E,GLU,Glutamic acid,charged,neither,hydrophilic,acidic,4.5,129.1155,109,large,0',
'F,PHE,Phenylalanine,non-polar,aromatic,hydrophobic,neutral,NULL,147.1766,135,large,0',
'G,GLY,Glycine,polar,neither,hydrophilic,neutral,NULL,57.0519,48,small,1',
'H,HIS,Histidine,charged,neither,hydrophilic,basic,6.04,137.1411,118,large,0',
'I,ILE,Isoleucine,non-polar,aliphatic,hydrophobic,neutral,NULL,113.1594,124,large,0',
'K,LYS,Lysine,charged,neither,hydrophilic,basic,10.54,128.1741,135,large,0',
'L,LEU,Leucine,non-polar,aliphatic,hydrophobic,neutral,NULL,113.1594,124,large,0',
'M,MET,Methionine,non-polar,aliphatic,hydrophobic,neutral,NULL,131.1986,124,large,0',
'N,ASN,Asparagine,polar,neither,hydrophilic,neutral,NULL,114.1039,96,small,0',
'P,PRO,Proline,non-polar,neither,hydrophobic,neutral,NULL,97.1167,90,small,0',
'Q,GLN,Glutamine,polar,neither,hydrophilic,neutral,NULL,128.1307,114,large,0',
'R,ARG,Arginine,charged,neither,hydrophilic,basic,12.48,156.1875,148,large,0',
'S,SER,Serine,polar,neither,hydrophilic,neutral,NULL,87.0782,73,small,1',
'T,THR,Threonine,polar,neither,hydrophilic,neutral,NULL,101.1051,93,small,0',
'V,VAL,Valine,non-polar,aliphatic,hydrophobic,neutral,NULL,99.1326,105,small,0',
'W,TRP,Tryptophan,non-polar,aromatic,hydrophobic,neutral,NULL,186.2132,163,large,0',
'Y,TYR,Tyrosine,polar,aromatic,hydrophobic,neutral,10.46,163.176,141,large,0' # Note: we treat tyrosine as hydrophobic in the polar/charged vs hydrophobic/Non-polar plot
]
amino_acid_detail_headers = [t.strip() for t in amino_acid_detail_headers.split(',') if t.strip()]
for aad in amino_acid_details_:
tokens = aad.split(',')
assert(len(tokens) == len(amino_acid_detail_headers))
d = {}
for x in range(len(amino_acid_detail_headers)):
d[amino_acid_detail_headers[x]] = tokens[x]
aa_code = d['Code']
BenchmarkRun.amino_acid_details[aa_code] = d
del d['Code']
d['Polarity'] = polarity_map.get(d['Polarity'], 'H')
d['Aromaticity'] = aromaticity_map[d['Aromaticity']]
d['Average mass'] = float(d['Average mass'])
d['Is tiny?'] = d['Is tiny?'] == 1
d['van der Waals volume'] = float(d['van der Waals volume'])
try: d['pKa'] = float(d['pKa'])
except: d['pKa'] = None
if aa_code == 'Y':
BenchmarkRun.HAA.add(aa_code) # Note: Treating tyrosine as hydrophobic
elif d['Polarity'] == 'C':
BenchmarkRun.CAA.add(aa_code)
elif d['Polarity'] == 'P':
BenchmarkRun.PAA.add(aa_code)
elif d['Polarity'] == 'H':
BenchmarkRun.HAA.add(aa_code)
assert(len(BenchmarkRun.CAA.intersection(BenchmarkRun.PAA)) == 0 and len(BenchmarkRun.PAA.intersection(BenchmarkRun.HAA)) == 0 and len(BenchmarkRun.HAA.intersection(BenchmarkRun.CAA)) == 0)
return BenchmarkRun.amino_acid_details, BenchmarkRun.CAA, BenchmarkRun.PAA, BenchmarkRun.HAA
def report(self, str, fn = None):
if (not self.silent) and (self.report_analysis):
if fn:
fn(str)
else:
print(str)
def create_analysis_directory(self, analysis_directory = None):
if self.analysis_directory:
return
if analysis_directory:
if not(os.path.isdir(analysis_directory)):
try:
os.makedirs(analysis_directory)
assert(os.path.isdir(analysis_directory))
self.analysis_directory = analysis_directory
except Exception as e:
raise colortext.Exception('An exception occurred creating the subplot directory %s.' % analysis_directory)
else:
self.analysis_directory = tempfile.mkdtemp( prefix = '%s-%s-%s_' % (time.strftime("%y%m%d"), getpass.getuser(), self.benchmark_run_name) )
def create_subplot_directory(self, analysis_directory = None):
if self.subplot_directory:
return
self.create_analysis_directory(analysis_directory = analysis_directory)
self.subplot_directory = os.path.join(self.analysis_directory, self.benchmark_run_name + '_subplots')
if not os.path.isdir(self.subplot_directory):
os.makedirs(self.subplot_directory)
def read_dataframe_from_content(self, hdfstore_blob):
fname = write_temp_file('/tmp', hdfstore_blob, ftype = 'wb')
try:
self.read_dataframe(fname)
os.remove(fname)
except:
os.remove(fname)
raise
def read_dataframe(self, analysis_pandas_input_filepath, read_scalar_adjustments = True, fail_on_missing_scalar_adjustments = False):
remove_file = False
if len(os.path.splitext(analysis_pandas_input_filepath)) > 1 and os.path.splitext(analysis_pandas_input_filepath)[1] == '.gz':
content = read_file(analysis_pandas_input_filepath)
analysis_pandas_input_filepath = write_temp_file('/tmp', content, ftype = 'wb')
remove_file = True
# We do not use "self.dataframe = store['dataframe']" as we used append in write_dataframe
self.dataframe = pandas.read_hdf(analysis_pandas_input_filepath, 'dataframe')
store = pandas.HDFStore(analysis_pandas_input_filepath)
self.scalar_adjustments = store['scalar_adjustments'].to_dict()
self.ddg_analysis_type = store['ddg_analysis_type'].to_dict()['ddg_analysis_type']
if read_scalar_adjustments:
try:
self.calculate_scalar_adjustments = store['calculate_scalar_adjustments'].to_dict()['calculate_scalar_adjustments']
except:
if not fail_on_missing_scalar_adjustments:
colortext.warning('The calculate_scalar_adjustments scalar was expected to be found in the pandas dataframe but is missing.')
self.calculate_scalar_adjustments = None
else:
raise
else:
self.calculate_scalar_adjustments = None
self.ddg_analysis_type_description = store['ddg_analysis_type_description'].to_dict()['ddg_analysis_type_description']
# Handle our old dataframe format
try:
self.misc_dataframe_attributes = store['misc_dataframe_attributes'].to_dict()
except: pass
# Handle our new dataframe format
try:
misc_dataframe_attribute_names = list(store['misc_dataframe_attribute_names'].to_dict().keys())
for k in misc_dataframe_attribute_names:
assert(k not in self.misc_dataframe_attributes)
self.misc_dataframe_attributes[k] = store[k].to_dict()[k]
except: pass
if 'Credit' in self.misc_dataframe_attributes:
self.credit = self.misc_dataframe_attributes['Credit']
store.close()
if remove_file:
os.remove(analysis_pandas_input_filepath)
def set_dataframe(self, dataframe, verbose = True):
self.dataframe = dataframe
# Report the SCOPe classification counts
SCOP_classifications = set(dataframe['WildTypeSCOPClassification'].values.tolist())
SCOP_folds = set(dataframe['WildTypeSCOPFold'].values.tolist())
SCOP_classes = set(dataframe['WildTypeSCOPClass'].values.tolist())
self.log('The mutated residues span {0} unique SCOP(e) classifications in {1} unique SCOP(e) folds and {2} unique SCOP(e) classes.'.format(len(SCOP_classifications), len(SCOP_folds), len(SCOP_classes)), colortext.message)
# Plot the optimum y-cutoff over a range of x-cutoffs for the fraction correct metric (when experimental data is available).
# Include the user's cutoff in the range.
if self.contains_experimental_data and self.calculate_scalar_adjustments:
if len(self.analysis_sets) == 0 and len(self.scalar_adjustments):
self.analysis_sets = list(self.scalar_adjustments.keys())
self.log('Determining scalar adjustments with which to scale the predicted values to improve the fraction correct measurement.', colortext.warning)
for analysis_set in self.analysis_sets:#scalar_adjustments.keys():
self.scalar_adjustments[analysis_set], plot_filename = self.plot_optimum_prediction_fraction_correct_cutoffs_over_range(analysis_set, min(self.stability_classication_x_cutoff, 0.5), max(self.stability_classication_x_cutoff, 3.0), suppress_plot = True, verbose = verbose)
# Add new columns derived from the adjusted values
for analysis_set in self.analysis_sets:
dataframe[BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set)] = dataframe['Predicted'] / self.scalar_adjustments[analysis_set]
dataframe[BenchmarkRun.get_analysis_set_fieldname('AbsoluteError_adj', analysis_set)] = (dataframe[BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)] - dataframe[BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set)]).abs()
add_fraction_correct_values_to_dataframe(dataframe, BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set), BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set), BenchmarkRun.get_analysis_set_fieldname('StabilityClassification_adj', analysis_set), x_cutoff = self.stability_classication_x_cutoff, y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True)
# Write the dataframe out to CSV
if self.store_data_on_disk:
self.write_dataframe_to_csv(self.analysis_csv_input_filepath)
# Write the dataframe out to JSON
# Note: I rolled my own as dataframe.to_dict(orient = 'records') gives us the correct format but discards the DatasetID (index) field
json_records = {}
indices = dataframe.index.values.tolist()
for i in indices:
json_records[i] = {}
for k, v in dataframe.to_dict().items():
for i, v in v.items():
assert(k not in json_records[i])
json_records[i][k] = v
if self.analysis_json_input_filepath and self.store_data_on_disk:
write_file(self.analysis_json_input_filepath, json.dumps(json_records, indent = 4, sort_keys=True))
# Write the values computed in this function out to disk
analysis_pandas_input_filepath = self.analysis_pandas_input_filepath
if self.store_data_on_disk:
if os.path.exists(analysis_pandas_input_filepath):
os.remove(analysis_pandas_input_filepath)
else:
analysis_pandas_input_filepath = write_temp_file('/tmp', '', ftype = 'wb')
try:
analysis_pandas_input_filepath = self.write_dataframe(analysis_pandas_input_filepath)
dataframe_blob = read_file(analysis_pandas_input_filepath, binary = True)
if not self.store_data_on_disk:
os.remove(analysis_pandas_input_filepath)
except Exception as e:
if not self.store_data_on_disk:
os.remove(analysis_pandas_input_filepath)
raise
return dataframe_blob
def write_dataframe(self, analysis_pandas_input_filepath):
store = pandas.HDFStore(analysis_pandas_input_filepath)
# Using "store['dataframe'] = self.dataframe" throws a warning since some String columns contain null values i.e. mixed content
# To get around this, we use the append function (see https://github.com/pydata/pandas/issues/4415)
store.append('dataframe', self.dataframe)
store['scalar_adjustments'] =
|
pandas.Series(self.scalar_adjustments)
|
pandas.Series
|
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time_horizon for i in range(1)])
int_cost = numpy.sum(numpy.dot(int_c, int_dwc))
female_pop = 188340000
male_pop = 196604000
pop = female_pop + male_pop
f_prop = female_pop / pop
m_prop = male_pop / pop
samples = ctrl_m.shape[0]
cs = 0
nq = 0
ic = [0.00 for i in range(samples)]
q_gained = [0.00 for i in range(samples)]
q_inc_percent = [0.00 for i in range(samples)]
htn_cost = [0.00 for i in range(samples)]
cvd_cost = [0.00 for i in range(samples)]
net_cost = [0.00 for i in range(samples)]
exp_inc_per = [0.00 for i in range(samples)]
for i in range(samples):
q_gained[i] = (((ctrl_m.loc[i, "Average DALYs"] - trt_m.loc[i, "Average DALYs"])* m_prop) + ((ctrl_f.loc[i, "Average DALYs"] - trt_f.loc[i, "Average DALYs"])* f_prop))
q_inc_percent[i] = q_gained[i] * 100/((ctrl_m.loc[i, "Average DALYs"] * m_prop) + (ctrl_f.loc[i, "Average DALYs"] *f_prop))
htn_cost[i] = int_cost + ((trt_m.loc[i, "Average HTN Cost"] - ctrl_m.loc[i, "Average HTN Cost"]) * m_prop) + ((trt_f.loc[i, "Average HTN Cost"] - ctrl_f.loc[i, "Average HTN Cost"]) * f_prop)
cvd_cost[i] = ((trt_m.loc[i, "Average CVD Cost"] - ctrl_m.loc[i, "Average CVD Cost"] + trt_m.loc[i, "Average Chronic Cost"] - ctrl_m.loc[i, "Average Chronic Cost"]) * m_prop) + ((trt_f.loc[i, "Average CVD Cost"] - ctrl_f.loc[i, "Average CVD Cost"] + trt_f.loc[i, "Average Chronic Cost"] - ctrl_f.loc[i, "Average Chronic Cost"]) * f_prop)
exp_inc_per[i] = (((trt_m.loc[i, "Average Cost"] - ctrl_m.loc[i, "Average Cost"] + int_cost) * m_prop) + ((trt_f.loc[i, "Average Cost"] - ctrl_f.loc[i, "Average Cost"] + int_cost) * f_prop)) * 100 / ((ctrl_m.loc[i, "Average Cost"] * m_prop ) + (ctrl_f.loc[i, "Average Cost"] * f_prop))
net_cost[i] = htn_cost[i] + cvd_cost[i]
ic[i] = net_cost[i] / q_gained[i]
if net_cost[i] < 0:
cs = cs + 1
if q_gained[i] < 0:
nq = nq + 1
budget_impact = numpy.mean(net_cost) * pop / time_horizon
htn_percap = numpy.mean(htn_cost) / time_horizon
cvd_percap = numpy.mean(cvd_cost) / time_horizon
htn_annual = numpy.mean(htn_cost) * pop / time_horizon
cvd_annual = numpy.mean(cvd_cost) * pop / time_horizon
cost_inc = numpy.mean(exp_inc_per)
ICER = numpy.mean(ic)
QALY = numpy.mean(q_inc_percent)
HTN = numpy.mean(htn_cost)
CVD = numpy.mean(cvd_cost)
icer_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(ic), scale=st.sem(ic))
qaly_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(q_inc_percent), scale=st.sem(q_inc_percent))
htn = st.t.interval(0.95, samples - 1, loc=numpy.mean(htn_cost), scale=st.sem(htn_cost))
cvd = st.t.interval(0.95, samples - 1, loc=numpy.mean(cvd_cost), scale=st.sem(cvd_cost))
cost_inc_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(exp_inc_per), scale=st.sem(exp_inc_per))
if budget_impact < 0:
m_icer = 'Cost Saving'
s_icer = 'CS'
else:
m_icer = numpy.mean(net_cost) / numpy.mean(q_gained)
s_icer = str(numpy.round(m_icer,1))
m_daly = str(numpy.round(QALY,3)) + "\n(" + str(numpy.round(qaly_95[0],3)) + " to " + str(numpy.round(qaly_95[1],3)) + ")"
m_htn = str(numpy.round(HTN,2)) + "\n(" + str(numpy.round(htn[0],2)) + " to " + str(numpy.round(htn[1],2)) + ")"
m_cvd = str(numpy.round(CVD,2)) + "\n(" + str(numpy.round(cvd[0],2)) + " to " + str(numpy.round(cvd[1],2)) + ")"
m_costinc = str(numpy.round(cost_inc, 2)) + "\n(" + str(numpy.round(cost_inc_95[0], 2)) + " to " + str(numpy.round(cost_inc_95[1], 2)) + ")"
m_budget = str(numpy.round(budget_impact,0)/1000)
err_cost = 1.96 * st.sem(exp_inc_per)
err_daly = 1.96 * st.sem(q_inc_percent)
str_icer = text + " (" + s_icer + ")"
detailed = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], ICER, icer_95[0],icer_95[1], QALY, qaly_95[0], qaly_95[1], htn[0], htn[1], cvd[0], cvd[1], budget_impact, htn_annual, cvd_annual, htn_percap, cvd_percap, cs, nq]
manuscript = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], m_icer, m_daly, m_costinc, m_htn, m_cvd, m_budget, cs]
plot = [text, str_icer, cost_inc, QALY, err_cost, err_daly]
return detailed, manuscript, plot
summary_output = []
appendix_output = []
plot_output = []
'''Analysis 0: Baseline'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Base Case')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 1: Doubled Medication Cost'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 2, 0, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 2, 0, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'2X Medication Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 2: Increased Programmatic Cost'''
time_horizon = 20
prog_cost = 0.13*4
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f =
|
pd.read_csv(file_name_f)
|
pandas.read_csv
|
"""Univariate anomaly detection module."""
__version__ = '1.0.0'
from typing import Dict
from fastapi import FastAPI
from pydantic import BaseModel
from adtk.detector import PersistAD, ThresholdAD, LevelShiftAD, VolatilityShiftAD
import numpy
import pandas
from . core.tools import aggregate_anomalies
app = FastAPI(
title='Univariate anomaly detection module.',
docs_url='/documentation',
redoc_url='/redoc',
description='Univariate anomaly detection based on historic data for time series.',
version=__version__
)
class Parameters(BaseModel):
"""Parameters for ADTK PersistAD"""
c: float = 3.0
window: str = '28D'
aggregate_anomalies: str = None
class TimeSeriesData(BaseModel):
"""Data provided for point anomaly detection."""
train_data: Dict[str, float]
score_data: Dict[str, float]
parameters: Parameters
class Anomalies(BaseModel):
"""Anomalies"""
anomaly_list: Dict[str, bool]
class ParametersThresholdAD(BaseModel):
"""Parameters for ADTK ThresholdAD"""
high: float = None
low: float = None
aggregate_anomalies: str = None
class TimeSeriesDataThresholdAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersThresholdAD
class ParametersLevelShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataLevelShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersLevelShiftAD
class ParametersVolatilityShiftAD(BaseModel):
"""Parameters for ADTK LevelShiftAD"""
c: float = 20.0
window: str = '60S'
aggregate_anomalies: str = None
class TimeSeriesDataVolatilityShiftAD(BaseModel):
"""Data provided for point anomaly detection."""
score_data: Dict[str, float]
parameters: ParametersVolatilityShiftAD
@app.post('/detect-point-anomalies', response_model=Anomalies)
async def detect_point_anomalies(time_series_data: TimeSeriesData):
"""Apply point anomaly detection and return list of anomalies."""
# create pandas Series from dictionary containing the time series
train_data = pandas.Series(time_series_data.train_data)
train_data.index =
|
pandas.to_datetime(train_data.index, unit='ms')
|
pandas.to_datetime
|
#%% Import
import hospice_project.definitions as defs
import hospice_project.data.transformers as t
import pandas as pd
import numpy as np
from haversine import haversine_vector, Unit
merged_df =
|
pd.read_pickle('data/interim/merged_df.pickle')
|
pandas.read_pickle
|
from qutip import *
from ..mf import *
import pandas as pd
from scipy.interpolate import interp1d
from copy import deepcopy
import matplotlib.pyplot as plt
def ham_gen_jc(params, alpha=0):
sz = tensor(sigmaz(), qeye(params.c_levels))
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
ham = (params.fc-params.fd)*a.dag()*a
ham += params.eps*(a+a.dag())
ham += 0.5*(params.f01-params.fd)*sz
ham += params.g*(a*sm.dag() + a.dag()*sm)
ham *= 2*np.pi
return ham
def c_ops_gen_jc(params, alpha=0):
c_ops = []
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
if params.gamma > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*(1+params.n_t))*sm)
if params.n_t > 0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*params.n_t)*sm.dag())
if params.gamma_phi > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma_phi)*sm.dag()*sm)
if params.kappa > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*(1+params.n_c))*a)
if params.n_c > 0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*params.n_c)*a.dag())
return c_ops
def iterative_alpha_calc(params, n_cycles=10, initial_alpha=0):
alpha = initial_alpha
try:
for idx in range(n_cycles):
ham = ham_gen_jc(params, alpha=alpha)
c_ops = c_ops_gen_jc(params, alpha=alpha)
rho = steadystate(ham, c_ops)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
a_exp = expect(a, rho)
alpha = a_exp
except:
alpha = None
return alpha
class Spectrum:
def __init__(self, parameters):
print('hello')
self.parameters = deepcopy(parameters)
self.mf_amplitude = None
self.me_amplitude = None
self.transmission_exp = None
self.hilbert_params = None
def iterative_calculate(self, fd_array, initial_alpha=0, n_cycles=10, prune=True):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
params = deepcopy(self.parameters)
fd_array = np.sort(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
else:
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
fd_array = np.flip(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
else:
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
if prune:
alpha_dim_iterative = alpha_dim_iterative.dropna()
alpha_bright_iterative = alpha_bright_iterative.dropna()
alpha_dim_iterative.sort_index(inplace=True)
alpha_bright_iterative.sort_index(inplace=True)
if change is 'hard':
# alpha_dim_diff = np.diff(alpha_dim_iterative)/np.diff(alpha_dim_iterative.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
first_dim_idx = np.argmax(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[first_dim_idx:]
# alpha_bright_diff = np.diff(alpha_bright_iterative) / np.diff(alpha_bright_iterative.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
last_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[:last_bright_idx + 1]
else:
first_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[first_bright_idx:]
last_dim_idx = np.argmin(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[:last_dim_idx+1]
self.iterative_amplitude = pd.concat([alpha_dim_iterative, alpha_bright_iterative], axis=1)
def gen_raw_hilbert_params(self, fd_array, c_levels):
self.hilbert_params = pd.DataFrame(np.zeros([fd_array.shape[0], 1]), index=fd_array, columns=['alpha_0'])
self.hilbert_params['c_levels'] = c_levels
def gen_iterative_hilbert_params(self, fd_limits, kind='linear', fill_value='extrapolate', fraction=0.5,
level_scaling=1.0, max_shift=False, max_levels=True, relative='dim', relative_crossover=None, c_levels_bistable=None):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
alpha_dim = self.iterative_amplitude['alpha_dim'].dropna()
# alpha_dim.sort_index(inplace=True)
# alpha_dim_diff = np.diff(alpha_dim)/np.diff(alpha_dim.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
# alpha_dim = alpha_dim.iloc[first_dim_idx:]
alpha_bright = self.iterative_amplitude['alpha_bright'].dropna()
# alpha_bright.sort_index(inplace=True)
# alpha_bright_diff = np.diff(alpha_bright) / np.diff(alpha_bright.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
# alpha_bright = alpha_bright.iloc[:last_bright_idx]
new_iterative_alphas = pd.concat([alpha_dim, alpha_bright], axis=1)
self.iterative_amplitude = new_iterative_alphas
alpha_dim_real_func = interp1d(alpha_dim.index, alpha_dim.real, kind=kind, fill_value=fill_value)
alpha_dim_imag_func = interp1d(alpha_dim.index, alpha_dim.imag, kind=kind, fill_value=fill_value)
def alpha_dim_func_single(fd):
alpha_dim = alpha_dim_real_func(fd) + 1j * alpha_dim_imag_func(fd)
return alpha_dim
alpha_dim_func_vec = np.vectorize(alpha_dim_func_single)
def alpha_dim_func(fd_array):
alpha_dim_array = alpha_dim_func_vec(fd_array)
alpha_dim_series = pd.Series(alpha_dim_array, index=fd_array, name='alpha_dim_func')
return alpha_dim_series
alpha_bright_real_func = interp1d(alpha_bright.index, alpha_bright.real, kind=kind,
fill_value=fill_value)
alpha_bright_imag_func = interp1d(alpha_bright.index, alpha_bright.imag, kind=kind,
fill_value=fill_value)
def alpha_bright_func_single(fd):
alpha_bright = alpha_bright_real_func(fd) + 1j * alpha_bright_imag_func(fd)
return alpha_bright
alpha_bright_func_vec = np.vectorize(alpha_bright_func_single)
def alpha_bright_func(fd_array):
alpha_bright_array = alpha_bright_func_vec(fd_array)
alpha_bright_series = pd.Series(alpha_bright_array, index=fd_array, name='alpha_bright')
return alpha_bright_series
alpha_dim_interp = alpha_dim_func(self.iterative_amplitude.index)
alpha_bright_interp = alpha_bright_func(self.iterative_amplitude.index)
alpha_diff_interp = (alpha_bright_interp - alpha_dim_interp).dropna()
if max_shift:
min_diff = np.min(np.abs(alpha_diff_interp))
alpha_diff_unit_interp = alpha_diff_interp / np.abs(alpha_diff_interp)
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * min_diff * alpha_diff_unit_interp
elif relative is 'both':
if change is 'soft':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[relative_crossover:] = alpha_bright_interp[relative_crossover:] - fraction * min_diff * alpha_diff_unit_interp[relative_crossover:]
else:
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[:relative_crossover] = alpha_bright_interp[:relative_crossover] - fraction * min_diff * alpha_diff_unit_interp[:relative_crossover]
else:
raise Exception('Relative is neither bright, dim nor both.')
else:
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * alpha_diff_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * alpha_diff_interp
else:
raise Exception('Relative is neither bright norm dim.')
alpha_diff_interp.name = 'alpha_diff'
alpha_0_interp.name = 'alpha_0'
hilbert_params = pd.concat([alpha_diff_interp, alpha_0_interp], axis=1)
if max_levels:
if c_levels_bistable is not None:
hilbert_params['c_levels'] = c_levels_bistable
else:
min_diff = np.min(np.abs(alpha_diff_interp))
hilbert_params['c_levels'] = np.int(np.ceil(level_scaling * min_diff ** 2))
else:
hilbert_params['c_levels'] = np.ceil(level_scaling * np.abs(alpha_diff_interp.values) ** 2).astype(int)
hilbert_params['c_levels'].loc[:fd_limits[0]] = self.parameters.c_levels
hilbert_params['c_levels'].loc[fd_limits[1]:] = self.parameters.c_levels
if change is 'hard':
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_bright'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_dim'].loc[fd_limits[1]:]
else:
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_dim'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_bright'].loc[fd_limits[1]:]
# hilbert_params = pd.concat([hilbert_params, alpha_dim_interp, alpha_bright_interp], axis=1)
self.alpha_dim_interp = alpha_dim_interp
self.alpha_bright_interp = alpha_bright_interp
self.alpha_diff_interp = alpha_diff_interp
self.hilbert_params = hilbert_params
self.completed = np.zeros(hilbert_params.index.shape[0])
self.attempted = np.zeros(hilbert_params.index.shape[0])
a_array = np.zeros(hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude = pd.DataFrame(a_array, index=hilbert_params.index)
def mf_calculate(self, fd_array, characterise_only=False):
if self.mf_amplitude is None:
self.mf_amplitude = map_mf_jc(self.parameters, fd_array=fd_array, characterise_only=characterise_only)
else:
fd0 = fd_array[0]
fd1 = fd_array[-1]
idx0 = self.mf_amplitude.index.get_loc(fd0, method='nearest')
idx1 = self.mf_amplitude.index.get_loc(fd1, method='nearest')
alpha0_dim = self.mf_amplitude['a_dim'].iloc[idx0]
sm0_dim = self.mf_amplitude['sm_dim'].iloc[idx0]
sz0_dim = self.mf_amplitude['sz_dim'].iloc[idx0]
alpha0_bright = self.mf_amplitude['a_bright'].iloc[idx1]
sm0_bright = self.mf_amplitude['sm_bright'].iloc[idx1]
sz0_bright = self.mf_amplitude['sz_bright'].iloc[idx1]
mf_amplitude_new = mf_characterise_jc(self.parameters, fd_array, alpha0_bright=alpha0_bright,
sm0_bright=sm0_bright, sz0_bright=sz0_bright, alpha0_dim=alpha0_dim,
sm0_dim=sm0_dim, sz0_dim=sz0_dim, check_bistability=False)
self.mf_amplitude = pd.concat([self.mf_amplitude, mf_amplitude_new])
self.mf_amplitude = self.mf_amplitude.sort_index()
self.mf_amplitude = self.mf_amplitude[~self.mf_amplitude.index.duplicated(keep='first')]
def generate_hilbert_params(self, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, max_shift=True,
c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0'):
print(c_levels_bi)
self.hilbert_params = generate_hilbert_params(self.mf_amplitude, c_levels_bi_scale=c_levels_bi_scale,
scale=scale, fd_limits=fd_limits, kind=kind,
max_shift=max_shift, c_levels_mono=c_levels_mono,
c_levels_bi=c_levels_bi, alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, method=method)
def me_calculate(self, solver_kwargs={}, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, fill_value='extrapolate',
max_shift=False, c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0', level_scaling=1.0, max_levels=True, save_name=None, resume_uncompleted=True):
if self.hilbert_params is None:
if method is 'iterative':
frequencies = self.iterative_amplitude.index
self.gen_iterative_hilbert_params(fd_limits, kind=kind, fill_value=fill_value, fraction=scale,
level_scaling=level_scaling, max_shift=max_shift, max_levels=max_levels)
else:
frequencies = self.mf_amplitude.index
self.generate_hilbert_params(c_levels_bi_scale=c_levels_bi_scale, scale=scale, max_shift=max_shift,
c_levels_mono=c_levels_mono, c_levels_bi=c_levels_bi,
alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, fd_limits=fd_limits, kind=kind, method=method)
if self.me_amplitude is None:
self.completed = np.zeros(self.hilbert_params.index.shape[0])
self.attempted = np.zeros(self.hilbert_params.index.shape[0])
a_array = np.zeros(self.hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude =
|
pd.DataFrame(a_array, index=self.hilbert_params.index)
|
pandas.DataFrame
|
#coding: utf-8
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
from log_lib import log
def frame():
data = {
'state': [
'hubei', 'sichuan', 'guangzhou'], 'year': [
2015, 2016, 2017], 'pop': [
6, 7, 8]}
frame = DataFrame(data)
log.notice(frame)
log.notice(DataFrame(data, columns=['year', 'state', 'pop', 'debt']))
log.notice(frame.columns)
log.notice(frame['state'])
log.notice(frame['pop'])
log.notice(frame['year'])
frame['debt'] = 16.5
log.notice(frame)
frame['debt'] = np.arange(3.)
log.notice(frame)
frame2 = DataFrame(
data, columns=[
'year', 'state', 'pop', 'debt'], index=[
'one', 'two', 'three'])
log.notice(frame2)
val = Series([-1.2, -1.3, -1.4], index=['two', 'three', 'one'])
frame2['debt'] = val
log.notice(frame2)
frame2['eastern'] = frame2.state == 'hubei'
log.notice(frame2)
log.notice(frame2.index)
def frame3():
pop = {
'hubei': {
2001: 2.4, 2002: 2.5}, "guangdong": {
2000: 2.6, 2001: 2.7}}
frame3 =
|
DataFrame(pop)
|
pandas.DataFrame
|
'''
Run using python from terminal.
Doesn't read from scripts directory (L13) when run from poetry shell.
'''
import pandas as pd
import pandas.testing as pd_testing
import typing as tp
import os
import unittest
from unittest import mock
import datetime
from scripts import influx_metrics_univ3 as imetrics
class TestInfluxMetrics(unittest.TestCase):
def get_price_cumulatives_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`get_price_cumulatives` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'get-price-cumulatives.csv')
df = pd.read_csv(base, sep=',')
df._start = pd.to_datetime(df._start)
df._stop = pd.to_datetime(df._stop)
df._time = pd.to_datetime(df._time)
return df
def get_find_start_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`find_start` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'find_start.csv')
df =
|
pd.read_csv(base, sep=',', index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 09:22:24 2021
@author: andre
"""
import numpy as np
import networkx as nx
import pandas as pd
import Clustering as cl
import bikeshare as bs
#%% Initialising
city = "chic"
Data = bs.Data(city, year = 2019, month = 9)
locations = Data.stat.locations
n_tot = Data.stat.n_tot
id_index = Data.stat.id_index
week_days = [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20, 23, 24, 25, 26, 27, 30]
weekend_days =[1, 7, 8, 14, 15, 21, 22, 28, 29]
month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
df = Data.df
df['hour'] = pd.to_datetime(df['start_t']).dt.hour
df['day'] =
|
pd.to_datetime(df['start_t'])
|
pandas.to_datetime
|
from io import BytesIO
import os, sys, datetime, csv, tempfile, argparse
import pandas as pd
def disk_in_memory(wav_bytes: bytes) -> BytesIO:
"""
this spooled wav was chosen because it's much more efficient than writing to disk,
it effectively is writing to memory only and can still be read (by some python modules) as a file
"""
with tempfile.SpooledTemporaryFile() as spooled_wav:
spooled_wav.write(wav_bytes)
spooled_wav.seek(0)
return BytesIO(spooled_wav.read())
def import_csvs(filepaths: str, disable_wer: bool = False) -> pd.DataFrame:
if disable_wer:
cols = ["filename"]
else:
cols = ["filename", "transcript"]
df = pd.DataFrame(columns=cols)
for csv in filepaths.split(","):
df_new = pd.read_csv(csv, index_col=None)
df_new = df_new[cols]
df =
|
pd.concat([df, df_new], sort=False)
|
pandas.concat
|
### compiler.py : Compiler for all desired data (nirc2, weather, seeing, telemetry, temperature)
### Author : <NAME>
### Date : 6/3/21
from . import times
from . import strehl
from . import mkwc
from . import telemetry as telem
from . import temperature as temp
from . import templates
import numpy as np
import pandas as pd
import os
# Check whether PyYAML is installed
try:
import yaml
except:
raise ValueError("PyYAML not installed. Please install from https://anaconda.org/anaconda/pyyaml")
### Use importlib for package resources
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
# All data types that can be compiled by this package
accept_labels = ['cfht', 'mass', 'dimm', 'masspro', 'k2AO', 'k2L4', 'k2ENV', 'telem']
# Default parameter file for data keywords
default_parfile = 'keyword_defaults.yaml'
# Shorthand / nicknames for data types
expand = {
'temp': ['k2AO', 'k2L4', 'k2ENV'],
'seeing': ['mass', 'dimm', 'masspro'],
'weather': ['cfht'],
'telemetry': ['telem'],
'all': accept_labels,
}
# Utility functions
def check_dtypes(data_types, file_paths):
"""
Checks user-input data types and file paths and re-formats if necessary.
data_types: list of data types or combined types
file_paths: dictionary with data_type: file_path pairs
returns: list of properly formatted data types
"""
new_dtypes = []
# Edge case: only temperature dir specified
if 'temp' in file_paths and os.path.isdir(file_paths['temp']):
file_paths['k2AO'] = file_paths['temp']+'k2AOtemps/'
file_paths['k2L4'] = file_paths['temp']+'k2L4temps/'
file_paths['k2ENV'] = file_paths['temp']+'k2envMet/'
# Check requested data types
for dtype in data_types:
# Check for nicknames
if dtype in expand:
new_dtypes.extend(expand[dtype])
elif dtype in accept_labels: # can get data
new_dtypes.append(dtype)
# Return cleaned list
return new_dtypes
def load_default_parfile():
"""
Loads default parameter file from package.
returns: dictionary of dtypes, with sub-dictionaries of data labels:bool/string
"""
try: # Load from templates module
file = pkg_resources.open_text(templates, default_parfile)
default_params = yaml.load(file, Loader=yaml.FullLoader)
except: # Raise error and request param file
raise ValueError("Unable to load default parameters. Please specify a parameter file.")
return default_params
def read_params(param_file):
"""
Reads parameters from the specified file or returns default parameters if no file is specified.
param_file: path to parameter file, as a string, or None to return default
returns: dictionary of dtypes, with sub-dictionaries of data labels:bool/string
"""
if param_file is None: # load default
params = load_default_parfile()
elif isinstance(param_file, str) and os.path.exists(param_file): # Load user-specified
try: # Open stream and read as YAML
with open(param_file, 'r') as stream:
params = yaml.load(stream, Loader=yaml.FullLoader)
except: # Unable to load
raise ValueError(f"Failed to load {param_file}. Please check that PyYAML is installed \
and that the file is formatted correctly.")
else: # Invalid input
raise ValueError(f"{param_file} is not a valid parameter file.")
return params
#############################################
######## Interface with other modules #######
#############################################
# Have files with acceptable columns for each data type so you can check inputs
# Accept column inputs to the data function and take optional arguments in combine_strehl
def data_func(dtype, data_dir=None):
"""
Returns the function to get data for the specified dtype
and whether data needs to be matched to nirc2 mjds
"""
# MKWC seeing (mass, dimm, masspro) or weather (cfht) files
if dtype in ['cfht']+expand['seeing']:
return lambda files,mjds: mkwc.from_nirc2(mjds, dtype, data_dir), True
# Temperature files (k2AO, k2L4, or k2ENV)
if dtype in expand['temp']:
return lambda files,mjds: temp.from_mjds(mjds, dtype, data_dir), True
# Telemetry files (matched to NIRC2 filenames)
if dtype=='telem':
return lambda files,mjds: telem.from_nirc2(mjds, files, data_dir), False
def change_cols(data, params):
"""
Changes / filters columns according to the parameters passed.
data: a dataframe with columns for a certain dtype
params: a list of column names to values
A False value means the column will be omitted
A True value means the column will be included as-is
A string value means the column will be re-named to that value
"""
# Drop bad columns first
good_cols = [col for col,val in params.items() if (val and col in data.columns)]
new_data = data[good_cols].copy()
# Re-map column names
col_mapper = {col: new_col for col,new_col in params.items() if isinstance(new_col, str)}
new_data.rename(columns=col_mapper, inplace=True)
return new_data
################################
###### Compiler Functions ######
################################
def match_data(mjd1, mjd2):
"""
Matches secondary MJD values to those of their closest primary observations.
mjd1: Nx1 array/series of primary MJDs
mjd2: Mx1 array/series of secondary MJDs
returns: Nx1 array of indices in mjd2 which best match entries of mjd1
"""
# Edge case
if mjd1.empty or mjd2.empty:
return None
# Get mjds from dataframes
mjd1 = np.array(mjd1).reshape(-1,1) # column vector
mjd2 = np.array(mjd2).reshape(1,-1) # row vector
# Take difference of mjd1 with mjd2
diffs = np.abs(mjd1 - mjd2)
# Find smallest difference for each original mjd
idxs = np.argmin(diffs, axis=1)
# Return indices of matches in mjd2
return idxs
# data_types can contain: 'chft', 'mass', 'dimm', 'masspro',
# 'telem', 'k2AO', 'k2L4', or 'k2ENV'
# 'temp' or 'seeing' will be expanded to ['k2AO', 'k2L4', 'k2ENV'] and
# ['mass', 'dimm', 'masspro'], respectively
def combine_strehl(strehl_file, data_types, file_paths={}, test=False,
param_file=None):
"""
Combines and matches data from a certain Strehl file with other specified data types.
NIRC2 files must be in the same directory as the Strehl file.
strehl_file: a string containing the file path for a Strehl file
data_types: a list of data types to match with the Strehl file
file_paths: a dictionary mapping data types to file paths for the relevant data
test: bool, if True the compiler will match only the first 3 lines of the Strehl file
param_file: string, path to a parameter file (see templates/keyword_defaults.yaml)
if None, default parameters will be used
returns: a fully matched dataset with Strehl, NIRC2, and other secondary data
"""
### Check file paths parameter dict, load if yaml
if not isinstance(file_paths, dict) and os.path.isfile(file_paths):
with open(file_paths) as file:
file_paths = yaml.load(file, loader=yaml.FullLoader)
# Check file paths are valid
for dtype, data_dir in file_paths.items():
if not os.path.isdir(data_dir):
raise ValueError((f"The path specified for {dtype} data ({data_dir}) "
"is not a valid directory."))
### Sanitize data types
data_types = check_dtypes(data_types, file_paths)
### Read in parameter file
params = read_params(param_file)
### Read in Strehl file
nirc2_data = strehl.from_strehl(strehl_file)
if test: # Take only first few files
nirc2_data = nirc2_data.loc[:3]
# Full data container
all_data = [nirc2_data.reset_index(drop=True)]
# Loop through and get data
for dtype in data_types:
# Get data directory
data_dir = file_paths[dtype] if dtype in file_paths else None
# Fetch data function from dictionary
get_data, match = data_func(dtype, data_dir) # Data retrieval function
# Get other data from strehl info
other_data = get_data(nirc2_data.nirc2_file, nirc2_data.nirc2_mjd)
# Change or omit selected columns
other_data = change_cols(other_data, params[dtype])
if match: # Needs to be matched
if other_data.empty: # No data found
other_data = pd.DataFrame(columns=other_data.columns, index=range(len(nirc2_data)))
else: # Get indices of matched data
idxs = match_data(nirc2_data.nirc2_mjd, other_data[dtype+'_mjd'])
other_data = other_data.iloc[idxs]
# Add to all data
all_data.append(other_data.reset_index(drop=True))
# Concatenate new data with nirc2
return
|
pd.concat(all_data, axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=
|
pd.DataFrame(y_pred_test1_rf)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
# import emoji
import gc
from utils.definitions import ROOT_DIR
from collections import OrderedDict
from utils.datareader import Datareader
def check_conditions( df, mean, std, error=(1.5,1.5)):
"""
checks if the dataframe given is near has the duration similar to the one that we want to create
similar == if its mean, std and number of emojis is at +/- error[] from it
:param df: dataframe to check
:param mean: target mean
:param std: target std
:param error:
:return:
"""
target_mean = np.mean(df['num_tracks'])
target_std = np.std(df['num_tracks'])
if mean > (target_mean + error[0]) or mean < (target_mean - error[0]):
print("error m ",mean,target_mean)
return False
if std > (target_std + error[1]) or std < (target_std - error[1]):
print("error s ",std,target_std)
return False
return True
def get_random_df_constrained( source_df, num_of_pl, min_v, max_v, mean, std, errors=(1.5, 1.5)):
"""
iterates until it creates a dataframe that satisfies the conditions.
"""
seed = 0
while True:
df = source_df[((source_df['num_tracks']) >= min_v) & ((source_df['num_tracks']) <= max_v)].sample(
n=num_of_pl, random_state=seed)
if check_conditions(df, mean=mean, std=std, error=errors):
break
seed+=1
return df,seed
def generate_train(playlists):
## mean
cates = {'cat1': (10, 50, 1000, 28.6, 11.2), 'cat2_1': (10, 40, 998, 23.8, 8.7),
'cat2_2': (70, 80, 2, 75, 4), 'cat3_1': (10, 50, 314, 29.4, 11.4),
'cat3_2': (51, 75, 425, 62, 7.2), 'cat3_3': (75, 100, 261, 87, 7.1),
'cat4': (40, 100, 1000, 63, 16.5), 'cat5': (40, 100, 1000, 63.5, 17.2),
'cat6': (40, 100, 1000, 63.6, 16.7), 'cat7': (101, 250, 1000, 150, 38.6),
'cat8': (101, 250, 1000, 151.7, 38.6), 'cat9': (150, 250, 1000, 189, 28),
'cat_10': (150, 250, 1000, 187.5, 27)}
cates = OrderedDict(sorted(cates.items(), key=lambda t: t[0]))
cat_pids = {}
seeds = [0] * len(cates)
count = 0
for cat, info in cates.items():
print(cat)
df, seeds[count] = get_random_df_constrained(playlists, min_v=info[0], max_v=info[1],
num_of_pl=info[2],
mean=info[3], std=info[4], errors=(1.5, 1.5))
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
count += 1
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
elif cat == 'cat2_1' or cat == 'cat2_2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr =
|
pd.concat([df_test_itr, df_sample])
|
pandas.concat
|
#!/usr/bin/env python3.7
import re
import argparse
import sys
import operator
import pandas as pd
from Bio import SeqIO
parser = argparse.ArgumentParser(description = "Generate viral annotation tables for testing Federico's R script")
parser.add_argument("-f", "--fasta", dest = "proteins", help = "Fasta file containing proteins predicted with Prodigal", required = True)
parser.add_argument("-t", "--table", dest = "vphmm", help = "Table containing informative annotations", required = True)
if len(sys.argv) == 1:
parser.print_help()
else:
args = parser.parse_args()
protein_file = args.proteins
annotation_table = args.vphmm
vphmm_hit_df = pd.read_table(annotation_table)
dataset_id = protein_file.split("_")[0]
annotation_list = []
for protein in SeqIO.parse(protein_file, "fasta"):
contig_id = re.search(r"[ER0-9]+_\d+[-Pro]*", protein.id).group(0)
protein_prop = protein.description.split(" # ")
del(protein_prop[4])
if protein_prop[0] in vphmm_hit_df["query"].values:
filtered_df = vphmm_hit_df[vphmm_hit_df["query"] == protein_prop[0]]
if len(filtered_df) > 1:
best_value_index = max(filtered_df["Abs_Evalue_exp"].items(), key = operator.itemgetter(1))[0]
protein_prop.extend(list(filtered_df.loc[best_value_index, ["ViPhOG", "Abs_Evalue_exp", "Taxon"]]))
else:
protein_prop.extend(list(filtered_df.loc[filtered_df.index[0], ["ViPhOG", "Abs_Evalue_exp", "Taxon"]]))
else:
protein_prop.extend(["No hit", "NA", ""])
annotation_list.append([contig_id] + protein_prop)
final_map_df =
|
pd.DataFrame(annotation_list, columns = ["Contig", "CDS_ID", "Start", "End", "Direction", "Best_hit", "Abs_Evalue_exp", "Label"])
|
pandas.DataFrame
|
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df =
|
tm.makeTimeDataFrame()
|
pandas.util.testing.makeTimeDataFrame
|
import collections
import logging
import os
import pandas as pd
import core.config as cconfig
import dataflow.core.result_bundle as dtfcorebun
import dataflow.core.utils as dtfcorutil
import helpers.hprint as hprint
import helpers.hunit_test as hunitest
_LOG = logging.getLogger(__name__)
class TestResultBundle(hunitest.TestCase):
def test_to_config1(self) -> None:
"""
Convert a `ResultBundle` to a config.
"""
rb = self._get_result_bundle()
# Check.
actual_config = rb.to_config(commit_hash=False)
txt = f"config without 'commit_hash' field:\n{actual_config}"
self.check_string(txt)
def test_from_config1(self) -> None:
"""
Initialize a `ResultBundle` from a config.
"""
# Initialize a `ResultBundle` from a config.
init_config = self._get_init_config()
rb = dtfcorebun.ResultBundle.from_config(init_config)
# Check.
actual_config = rb.to_config(commit_hash=False)
txt = f"config without 'commit_hash' field:\n{actual_config}"
self.check_string(txt)
def test_to_dict_and_back(self) -> None:
"""
Round-trip conversion using `from_config()` and `to_config()`.
"""
# Initialize a `ResultBundle` from a config.
init_config = self._get_init_config()
result_bundle = dtfcorebun.ResultBundle.from_config(init_config)
# This pattern is used in `master_experiment.py` before pickling.
rb_as_dict = result_bundle.to_config().to_dict()
# After unpickling, we convert to a `Config`, then to a `ResultBundle`.
result_bundle_2 = dtfcorebun.ResultBundle.from_config(
cconfig.get_config_from_nested_dict(rb_as_dict)
)
# Check.
self.assert_equal(str(result_bundle), str(result_bundle_2))
def test_pickle1(self) -> None:
rb = self._get_result_bundle()
# Serialize.
dir_name = self.get_scratch_space()
file_name = os.path.join(dir_name, "result_bundle.pkl")
rb.to_pickle(file_name, use_pq=False)
# Compute the signature of the dir.
actual = hunitest.get_dir_signature(dir_name, include_file_content=False)
# Check.
expected = """
# Dir structure
$GIT_ROOT/dataflow/core/test/TestResultBundle.test_pickle1/tmp.scratch
$GIT_ROOT/dataflow/core/test/TestResultBundle.test_pickle1/tmp.scratch/result_bundle.v1_0.pkl
"""
expected = hprint.dedent(expected)
self.assert_equal(str(actual), str(expected), purify_text=True)
def test_get_tags_for_column1(self) -> None:
rb = self._get_result_bundle()
#
actual = rb.get_tags_for_column("col2")
expected = ["target_col", "step_1"]
self.assert_equal(str(actual), str(expected))
def test_get_columns_for_tag1(self) -> None:
rb = self._get_result_bundle()
#
actual = rb.get_columns_for_tag("step_1")
expected = ["col2", "col4"]
self.assert_equal(str(actual), str(expected))
@staticmethod
def _get_init_config() -> cconfig.Config:
# TODO(gp): Factor out common part.
df = pd.DataFrame([range(5)], columns=[f"col{i}" for i in range(5)])
init_config = cconfig.get_config_from_nested_dict(
{
"config": {
"key": "val",
},
"result_nid": "leaf_node",
"method": "fit",
"result_df": df,
"column_to_tags": {
"col0": ["feature_col"],
"col1": ["target_col", "step_0"],
"col2": ["target_col", "step_1"],
"col3": ["prediction_col", "step_0"],
"col4": ["prediction_col", "step_1"],
},
"info": {"df_info": dtfcorutil.get_df_info_as_string(df)},
"payload": None,
}
)
return init_config
def _get_result_bundle(self) -> dtfcorebun.ResultBundle:
"""
Initialize a `ResultBundle` from a config.
"""
init_config = self._get_init_config()
rb = dtfcorebun.ResultBundle.from_config(init_config)
return rb
# #############################################################################
class TestPredictionResultBundle(hunitest.TestCase):
def test_to_config1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual_config = prb.to_config(commit_hash=False)
self.check_string(f"config without 'commit_hash' field:\n{actual_config}")
def test_feature_col_names1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.feature_col_names
expected = ["col0"]
self.assertListEqual(actual, expected)
def test_target_col_names1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.target_col_names
expected = ["col1", "col2"]
self.assertListEqual(actual, expected)
def test_prediction_col_names1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.prediction_col_names
expected = ["col3", "col4"]
self.assertListEqual(actual, expected)
def test_get_target_and_prediction_col_names_for_tags1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.get_target_and_prediction_col_names_for_tags(
tags=["step_0", "step_1"]
)
expected = {"step_0": ("col1", "col3"), "step_1": ("col2", "col4")}
self.assertDictEqual(actual, expected)
def test_get_target_and_prediction_col_names_for_tags2(self) -> None:
"""
Try to extract columns with no target column for given tag.
"""
init_config = self._get_init_config()
init_config["column_to_tags"].pop("col1")
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
with self.assertRaises(AssertionError):
prb.get_target_and_prediction_col_names_for_tags(tags=["step_0"])
def test_get_target_and_prediction_col_names_for_tags3(self) -> None:
"""
Extract columns with no target column for another tag.
"""
init_config = self._get_init_config()
init_config["column_to_tags"].pop("col1")
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.get_target_and_prediction_col_names_for_tags(tags=["step_1"])
expected = {"step_1": ("col2", "col4")}
self.assertDictEqual(actual, expected)
def test_get_targets_and_predictions_for_tags1(self) -> None:
init_config = self._get_init_config()
prb = dtfcorebun.PredictionResultBundle(**init_config.to_dict())
actual = prb.get_targets_and_predictions_for_tags(
tags=["step_0", "step_1"]
)
expected = {
"step_0": (pd.Series([1], name="col1"), pd.Series([3], name="col3")),
"step_1": (pd.Series([2], name="col2"),
|
pd.Series([4], name="col4")
|
pandas.Series
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
|
assert_frame_equal(actual, expected)
|
pandas.util.testing.assert_frame_equal
|
import pandas as pd
assignment = dict()
assignment = {'Tool':['Python','JavaScript','Twitter','GitHub', 'Gephi','GeoNames','Transkribus','Excel','MySQL'],'2015':[9,8,10,2,11,2,0,5,0],'2016':[22,18,18,6,16,4,1,9,6],'2017':[27,12,26,17,14,3,2,3,9],'2018':[32,6,16,5,10,1,1,6,5],'2019':[35,15,12,10,9,8,8,7,7]}
table =
|
pd.DataFrame(assignment)
|
pandas.DataFrame
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/tylerlum/ufc_automated_scoring_system/blob/main/UFC_automated_scoring.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="q2Ink0cSOSRR"
# # UFC Automated Scoring
#
# The goal of this notebook is to:
# * Read in stored, scraped UFC data and convert it into a dataset ready for ML models
# * Train, test, and analyze ML models
#
# Functional as of April 2021
# + [markdown] id="XIv8RUYoOSRW"
# ## Read in stored data
# + id="Ws0PWbZMOSRX"
import numpy as np
import pandas as pd
# + id="3MgHHwyvOSRX"
STORED_FIGHT_TABLE = pd.read_csv('FIGHT_TABLE_NUM_EVENTS_All_DATA_MODE_Summary_22-04-2021_11:08:22.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 657} id="PlBKZMo0OSRX" outputId="c3d7efa1-5a1e-48a3-8ef8-e860e43c023e"
STORED_FIGHT_TABLE
# + colab={"base_uri": "https://localhost:8080/", "height": 640} id="uF1ya3BLOSRY" outputId="437ecbfd-a5d6-42ad-e64b-c60d930d8c23"
# Clean dataset: Only decisions with clear winners
STORED_FIGHT_TABLE = STORED_FIGHT_TABLE[STORED_FIGHT_TABLE["Method"].str.contains("DEC")]
STORED_FIGHT_TABLE = STORED_FIGHT_TABLE[(STORED_FIGHT_TABLE["Winner"] == 1) | (STORED_FIGHT_TABLE["Winner"] == 0)]
STORED_FIGHT_TABLE
# + id="SivCNBMTOSRZ"
X = STORED_FIGHT_TABLE.drop(['Winner', 'Fighter 0 Name', 'Fighter 1 Name', 'Method'], axis=1).fillna(0)
y = STORED_FIGHT_TABLE[['Winner']]
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="ePWHHGDiOSRZ" outputId="c211382d-fdd4-498c-eaef-449702015273"
X
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="-13NNIUoOSRZ" outputId="83aeddc0-e9ef-447e-aed2-703493cf7b6c"
y
# + [markdown] id="Jd2Ifk5uOSRa"
# ## Setup train/validate/test split with data augmentation
#
# TODO: Add in smarter data augmentation that create new datapoints nearby.
# + id="I6c8hDK7OSRa"
def create_flipped_table(table):
'''Rearranges columns of table so that each fight has two rows. Let fighters be A and B.
One row has (Fighter 0 = A, Fighter 1 = B). One row has (Fighter 0 = B, Fighter 1 = A)
Ensure same column order, as column names not looked at when passed to ML model'''
# Get columns in flipped order, which moves the columns around, but changes column name order too
flipped_columns = []
for column in table.columns:
if "Fighter 0" in column:
flipped_columns.append(column.replace("Fighter 0", "Fighter 1"))
elif "Fighter 1" in column:
flipped_columns.append(column.replace("Fighter 1", "Fighter 0"))
else:
flipped_columns.append(column)
flipped_table = table[flipped_columns]
# Flips winners around
if 'Winner' in flipped_table.columns:
flipped_table['Winner'] = flipped_table['Winner'].replace([0, 1], [1, 0])
# Change column names back to normal
flipped_table.columns = table.columns
return flipped_table
def add_rows_of_flipped_columns(table):
flipped_table = create_flipped_table(table)
new_table = pd.concat([table, flipped_table])
return new_table
# + id="0iGtsWJVOSRa"
# Train/validate/test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=0)
# Add flipped rows so fighter 0 and 1 are treated same
X_train, y_train = add_rows_of_flipped_columns(X_train), add_rows_of_flipped_columns(y_train)
X_valid, y_valid = add_rows_of_flipped_columns(X_valid), add_rows_of_flipped_columns(y_valid)
X_test, y_test = add_rows_of_flipped_columns(X_test), add_rows_of_flipped_columns(y_test)
# + id="oRU788hJOSRb"
# Expect equal number of examples in Fighter 0 as Fighter 1 from data augmentation
assert(len(y_train[y_train['Winner'] == 0]) == len(y_train[y_train['Winner'] == 1]))
assert(len(y_valid[y_valid['Winner'] == 0]) == len(y_valid[y_valid['Winner'] == 1]))
assert(len(y_test[y_test['Winner'] == 0]) == len(y_test[y_test['Winner'] == 1]))
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="g208MGkGOSRb" outputId="8d7f37ae-722e-4285-f255-de544ae009fd"
X_train
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="wojL_I-7OSRb" outputId="ab800758-1a0d-40fc-dde0-5ed1820d6b39"
y_train
# + colab={"base_uri": "https://localhost:8080/"} id="DndW0X9aOSRc" outputId="2ee4f6d8-e3a0-4dfa-8b49-c81b511307ab"
print(f"X_train.shape = {X_train.shape}")
print(f"X_valid.shape = {X_valid.shape}")
print(f"X_test.shape = {X_test.shape}")
print(f"y_train.shape = {y_train.shape}")
print(f"y_valid.shape = {y_valid.shape}")
print(f"y_test.shape = {y_test.shape}")
# + [markdown] id="xNEsuXzFOSRf"
# ### Standardize features and break into fighter 0 and 1
# + id="sKWmgHtTdXLr"
fighter0_columns = [col for col in X_train.columns if "Fighter 0" in col]
fighter1_columns = [col for col in X_train.columns if "Fighter 1" in col]
X0_train = X_train[fighter0_columns]
X1_train = X_train[fighter1_columns]
X0_valid = X_valid[fighter0_columns]
X1_valid = X_valid[fighter1_columns]
X0_test = X_test[fighter0_columns]
X1_test = X_test[fighter1_columns]
X_train_new = pd.concat([X0_train, X1_train], axis=1)
X_valid_new = pd.concat([X0_valid, X1_valid], axis=1)
X_test_new = pd.concat([X0_test, X1_test], axis=1)
means, stds = X_train_new.mean(), X_train_new.std()
X_train_new_normal = (X_train_new - means) / stds
X_valid_new_normal = (X_valid_new - means) / stds
X_test_new_normal = (X_test_new - means) / stds
# + colab={"base_uri": "https://localhost:8080/"} id="x3836SoTgnx8" outputId="3f2daefc-9add-4897-98c3-79640a9f101d"
# Add data augmentation only on training data (can try SMOTE, gaussian noise, etc)
extra_train_copies = 10
mu, sigma = 0, 0.1
noisy_copies = [X_train_new_normal + np.random.normal(mu, sigma, X_train_new_normal.shape) for _ in range(extra_train_copies)]
print(f"X_train_new_normal.shape = {X_train_new_normal.shape}")
print(f"y_train.shape = {y_train.shape}")
X_train_new_normal_aug = pd.concat([X_train_new_normal] + noisy_copies, axis=0)
y_train_aug =
|
pd.concat([y_train] + [y_train] * extra_train_copies, axis=0)
|
pandas.concat
|
"""
MIT License
Copyright (c) 2021, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------------------------------------------------
Experiment using SVM classifier
=====================
This class implements our experiment using Support Vector Machine (SVM) classifier, if different classifier is required
then refer to the comments in the classifier section for instructions, only few changes needed for updating the experiment
Note: This experiment would need data from hypervolume calculation which can be done by using the R script (hyper_vol_usage.R)
This experiment uses optional sklearnex package to provoide optimization for sklearn library running on intel processors
"""
import collections
import sys
from operator import itemgetter
from sklearn.cluster import KMeans
from source_code.adversaries.kpp_attack import KppAttack
from source_code.adversaries.mk_attack import MkAttack
from source_code.adversaries.stat_attack import StatAttack
from source_code.adversaries.hyp_attack import HypVolAttack
from source_code.dataset.biometric_dataset import BioDataSet
import numpy as np
import pandas as pd
import os
import seaborn as sns
from joblib import dump, load
import matplotlib.pyplot as plt
from source_code.metrics.confusion_matrix import ConfusionMatrix
from source_code.metrics.fcs import FCS
from source_code.metrics.roc_curve import RocCurve
from source_code.synth_data_gen.gauss_blob_generator import GaussBlob
from source_code.analytics.dataoverlap_interval import OverLapInt
import traceback
class HypExp:
def __init__(self, pop_df, attack_df, pop_classifier_path, pos_user_per_dim_ol_path, active_gr,
results_save_path=None,
attack_samples=1000, boot_strap_st_at=False, bs_data_path=None, bs_mul=1,
hv_cut_off=0.04, gr2_per_dim_ol_path=None, std_dev_at_gr=5, clf_type=None,
hyp_at_u_data=None, rand_state=None, train_data_size=0.6,
train_classifiers=False, cluster_data_path=None, hyp_vol_data_path=None,
num_cls=None, cv=10, random_grid_search_iter=25):
self.pop_df = pop_df.copy()
self.attack_df = attack_df.copy()
self.active_gr = active_gr
self.classifier_training = train_classifiers
self.results_save_path = results_save_path
self.clf_type = clf_type
self.rand_state = rand_state
self.attack_samples = attack_samples
self.boot_strap_st_at = boot_strap_st_at
self.train_data_size = train_data_size
self.num_cls = num_cls
self.cv = cv
self.n_iter = random_grid_search_iter
self.bs_mul = bs_mul
self.hv_cut_off = hv_cut_off
self.feat_list = None
self.clf_path = pop_classifier_path
self.pos_user_per_dim_ol_path = pos_user_per_dim_ol_path
self.bs_data_path = bs_data_path
self.gr2_per_dim_ol_path = gr2_per_dim_ol_path
self.std_dev_at_gr = std_dev_at_gr
self.cluster_data_path = cluster_data_path
self.hyp_vol_data_path = hyp_vol_data_path
# creating dictionaries for data gathering
self.test_prd_dict = dict()
self.test_prd_prob_dict = dict()
self.test_cm_dict = dict()
self.test_precision = dict()
self.roc_dict = dict()
self.fcs_dict = dict()
self.fcs_plt = dict()
self.att_prd_mk = dict()
self.att_prd_prob_mk = dict()
self.att_prd_kpp = dict()
self.att_prd_prob_kpp = dict()
self.att_prd_stat = dict()
self.att_prd_prob_stat = dict()
self.att_prd_hyp = dict()
self.att_prd_prob_hyp = dict()
# Attack Data
self.attack_df_kpp = None
self.attack_df_mk = None
self.attack_df_stat = None
if hyp_at_u_data is not None:
self.attack_df_hyp = hyp_at_u_data
else:
self.attack_df_hyp = None
# Result Data
self.acc_res_full_df = None
self.acc_res_df = None
self.acc_per_df = None
self.acc_eer_df = None
self.stack_res_df = None
return
def run_exp(self):
data_group_1 = dict()
clf_dict = dict()
gr2_means = self.attack_df.mean()
gr2_means_fv = gr2_means.drop('user', axis=0).to_numpy().reshape(1, -1)
gr2_std = self.attack_df.std()
gr2_std_fv = gr2_std.drop('user', axis=0).to_numpy().reshape(1, -1)
tb_data_group_1 = BioDataSet(feature_data_frame=self.pop_df, random_state=self.rand_state)
tb_data_group_2 = BioDataSet(feature_data_frame=self.attack_df, random_state=self.rand_state)
# Extracting users in both groups
users_group_1 = tb_data_group_1.user_list
users_group_2 = tb_data_group_2.user_list
self.feat_list = self.pop_df.columns.drop('user').to_list()
"""
Generating user data
"""
user_g1_df_dict = dict()
for user in users_group_1:
data_group_1[user] = tb_data_group_1.get_data_set(user, neg_sample_sources=None, neg_test_limit=True)
user_g1_df_dict[user] = self.pop_df[self.pop_df['user'] == user]
if self.classifier_training is True:
scoring_metric = 'precision'
self.cv = 10 # specify a number for cv fold cross validation
self.n_iter = 25 #number of iterations for random grid search
precision_tup = list()
eer_tup = list()
print(f"training classifiers")
if self.clf_type == 'svm':
# Commnet out two lines below if not using an intel processor or sklearnex is not installed
from sklearnex import patch_sklearn
patch_sklearn()
from classifiers.svm_classifier import SvmClassifier
# Classifier training grid params, update with classifer specic hyper parameters
c_range = np.unique(np.logspace(start=0.1, stop=4, num=100 + 20, dtype=int))
grid_svm = {'estimator__C': c_range,
'estimator__gamma': ['auto', 'scale']}
# Update classifier on line below for using a different classifer
clf_dict = {usr: SvmClassifier(pos_user=data_group_1[usr], random_state=self.rand_state)
for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_svm, cv=self.cv, scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
elif self.clf_type == 'knn':
from classifiers.knn_classifier import KnnClassifier
leaf_size = list(range(1, 70))
n_neighbors = list(range(1, 50))
p = [1, 2]
grid_knn = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p)
clf_dict = {usr: KnnClassifier(pos_user=data_group_1[usr], random_state=self.rand_state,
n_jobs=-1) for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_knn, cv=self.cv,
scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
elif self.clf_type == 'rf':
# Commnet out two lines below if not using an intel processor or sklearnex is not installed
from classifiers.random_forest_classifier import RandomForestClassifier
from sklearnex import patch_sklearn
patch_sklearn()
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
grid_rf = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
clf_dict = {usr: RandomForestClassifier(pos_user=data_group_1[usr], random_state=self.rand_state,
n_jobs=-1) for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_rf, cv=self.cv,
scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
else:
print('classifier not implimented')
sys.exit(1)
print(f"training classifiers complete")
else:
"""
Loading classifiers from disk
"""
print(f"Loading classifiers")
try:
clf_dict = {usr: load(os.path.join(self.clf_path, f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"))
for usr in users_group_1}
except Exception():
traceback.print_exc()
print(f"Loading classifiers complete")
"""
Calculating mean overlaps on feature level
"""
print(f"Calculating mean overlaps on feature level started")
overlap_other_per_user_means_df = pd.DataFrame()
overlap_by_other_per_user_means_df = pd.DataFrame()
for pos_user in users_group_1:
pos_user_per_dim_ol_path = self.pos_user_per_dim_ol_path
pos_user_per_dim_ol = pd.read_csv(pos_user_per_dim_ol_path)
pos_user_per_dim_ol = pos_user_per_dim_ol.drop('Unnamed: 0', axis=1)
pos_user_pd_ol_others = pos_user_per_dim_ol[(pos_user_per_dim_ol['V2'] == pos_user)]
pos_user_pd_ol_others_mean = pos_user_pd_ol_others.drop(['V1', 'V2'], axis=1).mean()
overlap_other_per_user_means_df[pos_user] = pos_user_pd_ol_others_mean
pos_user_pd_ol_by_others = pos_user_per_dim_ol[(pos_user_per_dim_ol['V1'] == pos_user)]
pos_user_pd_ol_by_others_mean = \
pos_user_pd_ol_by_others.drop(['V1', 'V2'], axis=1).mean().sort_values()
overlap_by_other_per_user_means_df[pos_user] = pos_user_pd_ol_by_others_mean
print(f"Calculating mean overlaps on feature level complete")
"""
Calculating mean statistics for overlaps over entire population
"""
overlap_other_means = overlap_other_per_user_means_df.mean(axis=1)
overlap_other_means = overlap_other_means.sort_values(ascending=True)
overlap_other_range = overlap_other_per_user_means_df.max(axis=1) - overlap_other_per_user_means_df.min(axis=1)
overlap_other_range = overlap_other_range.sort_values(ascending=True)
overlap_by_other_means = overlap_by_other_per_user_means_df.mean(axis=1)
overlap_by_other_means = overlap_by_other_means.sort_values(ascending=True)
overlap_by_other_range = overlap_by_other_per_user_means_df.max(
axis=1) - overlap_by_other_per_user_means_df.min(axis=1)
overlap_by_other_range = overlap_by_other_range.sort_values(ascending=True)
'''
Model Classification
'''
print(f"Starting model classification")
self.test_prd_dict = {usr: clf_dict[usr].classify() for usr in users_group_1}
self.test_prd_prob_dict = {usr: clf_dict[usr].predictions_prob for usr in users_group_1}
print(f"Model classification complete")
"""
Test set and labels extraction
"""
test_set = {usr: clf_dict[usr].test_data_frame.drop('labels', axis=1) for usr in users_group_1}
test_labels = {usr: clf_dict[usr].test_data_frame.labels.values for usr in users_group_1}
"""
Confusion Matrix
"""
self.test_cm_dict = {usr: ConfusionMatrix() for usr in users_group_1}
matrix_svm = {usr: self.test_cm_dict[usr].get_metric(true_labels=test_labels[usr],
predicted_labels=self.test_prd_dict[usr])
for usr in users_group_1}
self.test_precision = {usr: self.test_cm_dict[usr].tp / (self.test_cm_dict[usr].tp + self.test_cm_dict[usr].fp)
for usr in users_group_1}
"""
ROC Curves
"""
self.roc_dict = {usr: RocCurve() for usr in users_group_1}
roc_svm = {usr: self.roc_dict[usr].get_metric(test_set_features=test_set[usr].values,
test_set_labels=test_labels[usr],
classifier=clf_dict[usr].classifier, ax=None)
for usr in users_group_1}
"""
FCS
"""
self.fcs_dict = {usr: FCS(classifier_name='SVM') for usr in users_group_1}
self.fcs_plt = {usr: self.fcs_dict[usr].get_metric(
true_labels=test_labels[usr],
predicted_probs=clf_dict[usr].predictions_prob,
pred_labels=clf_dict[usr].predictions)
for usr in users_group_1}
plt.close('all')
"""
Master Key Attack
"""
# Generating attack set
mk_adv = MkAttack(data=self.attack_df, required_attack_samples=self.attack_samples)
self.attack_df_mk = mk_adv.generate_attack()
# Performing attack
self.att_prd_mk = {usr: clf_dict[usr].classifier.predict(self.attack_df_mk.values)
for usr in users_group_1}
att_prd_prob_mk = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_mk.values)
for usr in users_group_1}
self.att_prd_prob_mk = {usr: att_prd_prob_mk[usr][:, 1]
for usr in users_group_1}
"""
Targeted K-means++ Attack
"""
# Generating attack set, first point is the mean of the attack data
kpp_adv = KppAttack(data=self.attack_df, required_attack_samples=self.attack_samples)
self.attack_df_kpp = kpp_adv.generate_attack()
# Performing attack
self.att_prd_kpp = {usr: clf_dict[usr].classifier.predict(self.attack_df_kpp.values)
for usr in users_group_1}
att_prd_prob_kpp = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_kpp.values)
for usr in users_group_1}
self.att_prd_prob_kpp = {usr: att_prd_prob_kpp[usr][:, 1]
for usr in users_group_1}
"""
Stats Attack
"""
stat_adv = StatAttack(data=self.attack_df, required_attack_samples=self.attack_samples,
bootstrap_data_path=self.bs_data_path,
run_bootstrap=self.boot_strap_st_at, bootstrap_iter=self.bs_mul * 1000)
self.attack_df_stat = stat_adv.generate_attack()
# Performing attack
self.att_prd_stat = {usr: clf_dict[usr].classifier.predict(self.attack_df_stat.values)
for usr in users_group_1}
att_prd_prob_stat = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_stat.values)
for usr in users_group_1}
self.att_prd_prob_stat = {usr: att_prd_prob_stat[usr][:, 1]
for usr in users_group_1}
"""
Hypervolume Attack
"""
if self.attack_df_hyp is None:
hyp_adv = HypVolAttack(data=self.attack_df, equal_user_data=False, random_state=self.rand_state,
calc_clusters=False,
clusters_path=self.cluster_data_path, gr_num=1, cluster_count=self.num_cls,
ol_path=self.hyp_vol_data_path, attack_samples=self.attack_samples,
ol_cut_off=self.hv_cut_off, std_dev_at_gr=None)
self.attack_df_hyp = hyp_adv.generate_attack()
else:
self.attack_df_hyp = self.attack_df_hyp
# Performing attack
self.att_prd_hyp = {usr: clf_dict[usr].classifier.predict(
self.attack_df_hyp.drop(["cluster_number"], axis=1).values)
for usr in users_group_1}
att_prd_prob_hyp = {usr: clf_dict[usr].classifier.predict_proba(
self.attack_df_hyp.drop(["cluster_number"], axis=1).values)
for usr in users_group_1}
self.att_prd_prob_hyp = {usr: att_prd_prob_hyp[usr][:, 1]
for usr in users_group_1}
df_hyp = pd.DataFrame.from_dict(self.att_prd_hyp)
df_stat = pd.DataFrame.from_dict(self.att_prd_stat)
df_kpp = pd.DataFrame.from_dict(self.att_prd_kpp)
df_mk = pd.DataFrame.from_dict(self.att_prd_mk)
df_prob_hyp = pd.DataFrame.from_dict(self.att_prd_prob_hyp)
df_prob_stat = pd.DataFrame.from_dict(self.att_prd_prob_stat)
df_prob_kpp = pd.DataFrame.from_dict(self.att_prd_prob_kpp)
df_prob_mk = pd.DataFrame.from_dict(self.att_prd_prob_mk)
df_hyp = pd.concat([df_hyp, self.attack_df_hyp['cluster_number']], axis=1)
df_hyp.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_hyp_at_prd_{self.clf_type}.csv"), index=False,
mode='w+')
df_stat.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_stat_at_prd_{self.clf_type}.csv"), index=False,
mode='w+')
df_kpp.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_kpp_at_prd_{self.clf_type}.csv"), index=False,
mode='w+')
df_mk.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_mk_at_prd_{self.clf_type}.csv"),
index=False, mode='w+')
df_prob_hyp = pd.concat([df_hyp, self.attack_df_hyp['cluster_number']], axis=1)
df_prob_hyp.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_hyp_at_prd_prob_{self.clf_type}.csv"),
index=False,
mode='w+')
df_prob_stat.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_stat_at_prd_prob_{self.clf_type}.csv"),
index=False,
mode='w+')
df_prob_kpp.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_kpp_at_prd_prob_{self.clf_type}.csv"),
index=False,
mode='w+')
df_prob_mk.to_csv(os.path.join(self.results_save_path, f"{self.active_gr}_mk_at_prd_prob_{self.clf_type}.csv"), index=False,
mode='w+')
df_hyp = df_hyp.drop("cluster_number", axis=1)
df_prob_hyp = df_prob_hyp.drop("cluster_number", axis=1)
user_crk_hyp = pd.DataFrame(columns=["try_num", "attack_type", "users_cracked_per"])
user_crk_stat =
|
pd.DataFrame(columns=["try_num", "attack_type", "users_cracked_per"])
|
pandas.DataFrame
|
import numpy
import pandas as pd
import torch
from disentanglement_lib.data.ground_truth import dsprites
data = []
bce = torch.nn.BCELoss(reduction='sum')
for i in range(1, 6):
action = dsprites.DSprites([i])
num = action.factors_num_values[0]
images = torch.FloatTensor([action[idx][0] for idx in range(num)])
images = torch.FloatTensor(images.reshape(num, -1))
p = torch.mean(images, 0, keepdim=True).repeat([num, 1])
H = bce(images, p).item() / num
data.append([i, H])
df =
|
pd.DataFrame(data, columns=['action', 'entropy'])
|
pandas.DataFrame
|
from typing import Dict
import arche.rules.coverage as cov
from arche.rules.result import Level
from conftest import *
import pandas as pd
import pytest
@pytest.mark.parametrize(
"df, expected_messages, expected_stats",
[
(
pd.DataFrame({"id": [n for n in range(1000)]}),
{},
[pd.Series([1000], index=["id"], name="Fields coverage for 1_000 items")],
),
(
pd.DataFrame([("Jordan", None)], columns=["Name", "Field"]),
{Level.ERROR: [("1 empty field(s)",)]},
[
pd.Series(
[1, 0], index=["Name", "Field"], name="Fields coverage for 1 items"
)
],
),
(
pd.DataFrame([(0, "")], columns=["Name", "Field"]),
{},
[
pd.Series(
[1, 1], index=["Field", "Name"], name="Fields coverage for 1 items"
)
],
),
],
)
def test_check_fields_coverage(df, expected_messages, expected_stats):
assert_results_equal(
cov.check_fields_coverage(df),
create_result("Fields Coverage", expected_messages, expected_stats),
)
@pytest.mark.parametrize(
"source_stats, target_stats, expected_messages, expected_stats",
[
(
{"counts": {"f1": 100, "f2": 150}, "totals": {"input_values": 100}},
{"counts": {"f2": 100, "f3": 150}, "totals": {"input_values": 100}},
{Level.ERROR: [("The difference is greater than 10% for 3 field(s)",)]},
[
create_named_df(
{"s": [0.0, 1.0, 1.5], "t": [1.5, 0.0, 1.0]},
index=["f3", "f1", "f2"],
name="Coverage from job stats fields counts",
),
pd.Series(
[-1.5, 0.5, 1.0],
index=["f3", "f2", "f1"],
name="Coverage difference more than 5%",
),
],
),
(
{"counts": {"f1": 100, "f2": 150}, "totals": {"input_values": 100}},
{"counts": {"f1": 106, "f2": 289}, "totals": {"input_values": 200}},
{
Level.ERROR: [("The difference is greater than 10% for 1 field(s)",)],
Level.WARNING: [
("The difference is between 5% and 10% for 1 field(s)",)
],
},
[
create_named_df(
{"s": [1.0, 1.5], "t": [0.53, 1.445]},
index=["f1", "f2"],
name="Coverage from job stats fields counts",
),
pd.Series(
[0.055, 0.47],
index=["f2", "f1"],
name="Coverage difference more than 5%",
),
],
),
(
{"counts": {"f1": 100, "f2": 150}, "totals": {"input_values": 100}},
{"counts": {"f1": 94, "f2": 141}, "totals": {"input_values": 100}},
{Level.WARNING: [("The difference is between 5% and 10% for 2 field(s)",)]},
[
create_named_df(
{"s": [1.0, 1.5], "t": [0.94, 1.41]},
index=["f1", "f2"],
name="Coverage from job stats fields counts",
),
pd.Series(
[0.06, 0.09],
index=["f1", "f2"],
name="Coverage difference more than 5%",
),
],
),
(
{"counts": {"state": 100}, "totals": {"input_values": 100}},
{"counts": {"state": 100}, "totals": {"input_values": 100}},
{},
[
create_named_df(
{"s": [1.0], "t": [1.0]},
index=["state"],
name="Coverage from job stats fields counts",
)
],
),
],
)
def test_get_difference(source_stats, target_stats, expected_messages, expected_stats):
assert_results_equal(
cov.get_difference(
Job(stats=source_stats, key="s"), Job(stats=target_stats, key="t")
),
create_result("Coverage Difference", expected_messages, stats=expected_stats),
)
@pytest.mark.parametrize(
"source_cols, target_cols, expected_messages",
[
(["range", "name"], ["name"], {Level.INFO: [("New - range",)]}),
(["name"], ["name", "sex"], {Level.ERROR: [("Missing - sex",)]}),
],
)
def test_compare_scraped_fields(source_cols, target_cols, expected_messages):
result = cov.compare_scraped_fields(
pd.DataFrame([], columns=source_cols),
|
pd.DataFrame([], columns=target_cols)
|
pandas.DataFrame
|
#!/usr/bin/env python
#
# Evaluation script for the training set of the CORSMAL Challenge
#
##################################################################################
# Author: <NAME>
# Email: <EMAIL>
#
# Created Date: 2020/09/02
# Modified Date: 2020/10/04
#
# Centre for Intelligent Sensing, Queen Mary University of London, UK
#
##################################################################################
# License
# This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
# International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
##################################################################################
#
import random
import argparse
import pandas as pd
import numpy as np
from pdb import set_trace as bp
def getDummyDataFrame(num_configs):
cols_headers = ['Configuration ID','Container capacity','Container mass',\
'Filling mass','None','Pasta','Rice','Water','Filling type','Empty',\
'Half-full','Full','Filling level','Width at the top','Width at the bottom',\
'Height','Object safety','Distance','Angle difference','Execution time']
dummy_mat = -np.ones((num_configs,20))
df = pd.DataFrame(data=dummy_mat,columns=cols_headers)
return df
def populateAnnotationEstimations(gt, args):
num_configs = len(gt['id'].values)
df = getDummyDataFrame(num_configs)
df['Configuration ID'] = gt['id']
num_tasks=0
if args.task1:
df['Empty'] = (gt['filling level'].values == 0).astype(int)
df['Half-full'] = (gt['filling level'].values == 1).astype(int)
df['Full'] = (gt['filling level'].values == 2).astype(int)
df['Filling level'] = gt['filling level']
num_tasks += 1
if args.task2:
df['None'] = (gt['filling type'].values == 0).astype(int)
df['Pasta'] = (gt['filling type'].values == 1).astype(int)
df['Rice'] = (gt['filling type'].values == 2).astype(int)
df['Water'] = (gt['filling type'].values == 3).astype(int)
df['Filling type'] = gt['filling type']
num_tasks += 1
if args.task3:
df['Container capacity'] = gt['container capacity']
num_tasks += 1
if args.task4:
df['Container mass'] = gt['container mass']
num_tasks += 1
if args.task5:
df['Width at the top'] = gt['width at the top']
df['Width at the bottom'] = gt['width at the bottom']
df['Height'] = gt['height']
num_tasks += 1
# if num_tasks > 0:
# df['Execution time'] = est['Execution time']
return df
def populateRandomEstimations(est, gt, args):
num_configs = len(est['Configuration ID'].values)
df = getDummyDataFrame(num_configs)
df['Configuration ID'] = est['Configuration ID']
num_tasks=0
if args.task1:
df['Empty'] = est['Empty']
df['Half-full'] = est['Half-full']
df['Full'] = est['Full']
df['Filling level'] = est['Filling level']
num_tasks += 1
if args.task2:
df['None'] = est['None']
df['Pasta'] = est['Pasta']
df['Rice'] = est['Rice']
df['Water'] = est['Water']
df['Filling type'] = est['Filling type']
num_tasks += 1
if args.task3:
if args.mode == 'average':
cc_avg = np.average(gt['container capacity'].unique())
df['Container capacity'] = df['Container capacity'].replace(-1,int(cc_avg))
else:
df['Container capacity'] = est['Container capacity']
num_tasks += 1
if args.task4:
if args.mode == 'average':
cm_avg = np.average(gt['container mass'].unique())
df['Container mass'] = df['Container mass'].replace(-1,int(cm_avg))
else:
df['Container mass'] = est['Container mass']
num_tasks += 1
if args.task5:
if args.mode == 'average':
cwt_avg = np.average(gt['width at the top'].unique())
df['Width at the top'] = df['Width at the top'].replace(-1,int(cwt_avg))
cwb_avg = np.average(gt['width at the bottom'].unique())
df['Width at the bottom'] = df['Width at the bottom'].replace(-1,int(cwb_avg))
ch_avg = np.average(gt['height'].unique())
df['Height'] = df['Height'].replace(-1,int(ch_avg))
else:
df['Width at the top'] = est['Width at the top']
df['Width at the bottom'] = est['Width at the bottom']
df['Height'] = est['Height']
num_tasks += 1
if num_tasks > 0:
df['Execution time'] = est['Execution time']
return df
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser(description='CORSMAL Challenge evaluation')
parser.add_argument('--task1', default=False, action='store_true')
parser.add_argument('--task2', default=False, action='store_true')
parser.add_argument('--task3', default=False, action='store_true')
parser.add_argument('--task4', default=False, action='store_true')
parser.add_argument('--task5', default=False, action='store_true')
parser.add_argument('--filename', default='teamN.csv', type=str)
parser.add_argument('--set', default='train', help="Choose the set option:\n--train\n--test_pub\n--test_priv", choices=['train','test_pub','test_priv'])
parser.add_argument('--mode', default='annotation', help="Choose the set option:\n--annotation\n--random\n--average", choices=['annotation','random','average'])
args = parser.parse_args()
if args.set == 'train':
gt = pd.read_csv('../annotations/ccm_train_annotation.csv', sep=',')
rnd =
|
pd.read_csv('train_set/random1.csv', sep=',')
|
pandas.read_csv
|
import pandas as pd
import click
REGEX_COLUMN = [
'specie',
'genus',
'family',
'order',
'phylum',
'domain',
'kingdom',
'organism',
'unnamed',
'citation',
'evidence',
'microbe_id',
'otu',
' id',
'non_null',
'pmid',
'sample type',
]
REGEX_COUNT_COL = [
'count',
'sols',
'_id_',
]
def file_clean(tbl):
column_clean = reduce_col(tbl)
tbl_renamed = modify_dataset_value(column_clean)
new_tbl = tbl_renamed.drop_duplicates(
subset=['scientific_name', 'taxonomic_id', 'rank']
) # not likely to actually be duplicates
new_tbl = new_tbl.replace('nan, ', '', regex=True)
new_tbl = new_tbl.replace('\'', '').replace('[', '').replace(']', '')
new_tbl = new_tbl.applymap(lambda x: x.replace('\'', '') if isinstance(x, str) else x)
return new_tbl
def clean_columns(tbl):
unnamed = [el for el in tbl.columns if 'unnamed' in el.lower()]
tbl = tbl.drop(columns=unnamed)
halo = [el for el in tbl.columns if 'halotolerance_classification' in el.lower()]
if halo:
h = tbl[halo[0]]
if isinstance(h, pd.DataFrame):
h = h.iloc[:, 0]
tbl['halotolerance'] = h.map(
lambda el: 'Moderate' if 'Moderate' in str(el) else str(el).strip()
)
tbl = tbl.drop(columns=halo)
return tbl
def reduce_col(tbl):
"""Remove empty columns, ids and taxonomy columns"""
drop_col = tbl.dropna(axis='columns', how='all')
drop_col.columns = map(str.lower, drop_col.columns)
col_names = list()
for reg in REGEX_COLUMN:
col_names.extend(list(drop_col.filter(regex=reg)))
drop_col = drop_col.drop(columns=col_names, axis=0)
drop_col.columns = rename_col(drop_col)
final_tbl = rename_MD1_tables(drop_col)
return final_tbl
def rename_col(tbl):
"""Convert column names as per snakelowercase standards"""
tbl.columns = tbl.columns.str.strip().str.lower()
pairs = [
(' ', '_'), ('_x', '_'), ('_y', '_'), ('.', '_'), (',', '_'),
('/', '_'), ('(', '_'), (')', ''),
]
for a, b in pairs:
tbl.columns = tbl.columns.str.replace(a, b)
return tbl.columns
def rename_MD1_tables(tbl):
"""Replace numeric entries from MD1"""
md1_cols = [
('gram_stain', ['Negative', 'Positive', 'Intermediate']),
('extreme_environment', ['Mesophiles', 'Extremophile']),
('antimicrobial_susceptibility', ['Maybe Not', 'Sometimes']),
('biofilm_forming', ['Never', 'Always']),
('animal_pathogen', ['Maybe Not', 'Sometimes']),
('plant_pathogen', ['Maybe Not', 'Sometimes']),
('microbiome_location', ['Maybe', 'Sometimes']),
('spore_forming', ['Never', 'Always'])
]
for col_name, new_vals in md1_cols:
old_vals = [i for i in range(len(new_vals))]
tbl[col_name] = tbl[col_name].replace(old_vals, new_vals)
tbl = tbl.rename(columns={'microbiome_location': 'human_commensal'})
return tbl
def modify_dataset_value(tbl):
"""Convert datasets with count values to interpretable values"""
if 'drylands' in tbl.columns:
for col in ['drylands', 'low_productivity', 'low_ph', 'high_ph']:
tbl[col] = tbl[col].replace([0, 1], ['Not Observed', 'Observed'])
for reg in REGEX_COUNT_COL:
regex_columns = [cols for cols in tbl.columns if reg in cols]
final_tbl = clean_count_datasets(tbl, regex_columns)
return final_tbl
def clean_count_datasets(tbl, regex_list):
"""Logic for count conversion"""
for reg in regex_list:
tbl[reg] =
|
pd.to_numeric(tbl[reg], errors='coerce')
|
pandas.to_numeric
|
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestData(unittest.TestCase):
def test_to_dict_dict_should_be_set(self):
"""
:test : mlsurvey.model.Data.to_dict()
:condition : x,y, y_pred data are filled.
:main_result : the dictionary generated is the same as expected
"""
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataPandas(df, df_contains='xyypred')
expected = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
result = data.to_dict()
self.assertDictEqual(expected, result)
def test_from_dict_df_empty(self):
"""
:test : mlsurvey.model.DataPandas.from_dict()
:condition : the input dict is set and an empty dataframe is given.
:main_result : a ModelError occurs
"""
df = pd.DataFrame(data=np.array([]))
d = None
input_dict = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
try:
d = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(d)
self.assertTrue(True)
def test_from_dict_dict_empty(self):
"""
:test : mlsurvey.model.Data.from_dict()
:condition : the input dict does not contains all keys and an full dataframe is given
:main_result : a ModelError occurs
"""
x = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df =
|
pd.DataFrame(data=data_array)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 17:54:14 2020
# Tout d'abord demandez la génération d'une API_KEY (une clé) dans votre compte SketchEngine :
# https://www.sketchengine.eu/documentation/api-documentation/#toggle-id-1 (Authentification)
# Pour plus d'infos sur les méthodes disponibles : https://www.sketchengine.eu/documentation/methods-documentation/
@author: emmanuelcartier
"""
import requests, time, json,re, glob
import pandas as pd
import numpy as np
from os import path
import os
import traceback
import pysolr
import mysql.connector
from mysql.connector import Error
import csv
from datetime import datetime
#currentSecond= datetime.now().second
#currentMinute = datetime.now().minute
#currentHour = datetime.now().hour
currentDay = datetime.now().day
currentMonth = datetime.now().month
currentYear = datetime.now().year
# sketchengine api
USERNAME = 'your username'
API_KEY = 'your api key'
base_url = 'https://api.sketchengine.eu/bonito/run.cgi'
# retrieve corpus info from sketchengine
def corpus_info(corpus):
''' get corpus info'''
params = {'struct_attr_stats':1,'gramrels':0,'corpcheck':0,'registry':0,'subcorpora':0}
params['corpname']=corpus
try:
res = requests.get(base_url + '/corp_info', params=params, auth=(USERNAME, API_KEY)).json()
#print(res, res.text)
return res
except Exception as e:
print("Error in result for query : [" + base_url + '/corp_info?], params : ' + str(params) + ', error : '+ str(e))
# parameters for view query (sketchengine)
params_query = {
'format': 'csv', # format de la réponse (attention au 30/03/2020 : métainformations disponibles seulement avec json!)
'async':0, # mode de réponse (ici on récupère toute la réponse, c'est plus long mais plus facile à gérer)
#'corpname': 'preloaded/fra_jsi_newsfeed_virt',
'attrs': 'word,tag,lemma', # informations pour le mot clé
'ctxattrs': 'word,tag,lemma', # idem pour les mots du contexte
# 'structs':'doc.uri,doc.website,doc.date,doc.source_country',# meta-informations (voir résultats requête précédente corp_info)
'refs':'=doc.uri,=doc.website,=doc.date,=doc.source_country',# meta-informations (voir résultats requête précédente corp_info)
# will be dynamically assigned : 'q':'q[lc="class"][lc="action"]', # query
'viewmode':'sen', # on récupère le mode sentence (phrase vs kwic)
'pagesize':10000, # nbre de résultats maximum
}
# parameters for wordlist query (sketchengine)
params_wordlist = {
'format': 'csv',
'async':0,
'wltype': 'simple',
'wlattr': 'word',
'wlnums': 'frq',#',docf,arf',
'wlminfreq':3,
'wlsort':'f',
'wlmaxitems': 1000,
# 'wlpat': dynamic value
#'pagesize':10000,
}
params_freq = {
'format': 'json',
'async':0,
'fcrit': 'word/0',
'flimit': 1000,
'freq_sort': 'freq',
# 'q': dynamic value
#'pagesize':10000,
}
params_wsketch = {
#'lemma': item,
#'lpos': '-v',
'corpname': 'preloaded/bnc2',
'format': 'json'
}
def query_sketchengine(query, params):
''' Cette fonction envoie une reqête à sketchengine et retourne la réponse
voir https://www.sketchengine.eu/documentation/methods-documentation/ pour tous les paramètres
'''
try:
if params['format']=='json':
res = requests.get(base_url + '/' + query, params=params, auth=(USERNAME, API_KEY), timeout=180).json()
else:
res = requests.get(base_url + '/' + query, params=params, auth=(USERNAME, API_KEY), timeout=180)
if res.status_code==200:
return res
else:
print("Error with query : response : " + str(res.status_code))
return False
except Exception as e:
print("Erreur dans la requête ("+ res.text + "). Message d'erreur : " + str(e))
print(traceback.format_exc())
return False
def generate_wordlist(series, wordlist, corpora, dir_res='./res_wordlist/'):
'''
generate files with wordlist from query to sketchengine wordlist
Parameters
----------
series : TYPE str
the name of the series.
wordlist : TYPE list
list of words/morphems.
corpora : TYPE dict
dictionary of corpora to query ({'en': 'preloaded/eng_jsi_newsfeed_virt'}).
dir_res : TYPE str, optional
susbdirectory name where to put result file(s). The default is './res_wordlist/'.
Returns
-------
None (files are written in res_wordlist subdirectory)
'''
# paramètres de la requête
params= {}
for lang, corp in corpora.items():
params['corpname'] = corp
params.update(params_wordlist)
corp_fn = params['corpname'].split('/')
for word in wordlist:
word1 = re.sub(r'\W','.?',word) # replace non-letter by . (any character)
params['wlpat'] = '.*' + word1 + '.*'
filename = dir_res + corp_fn[1] + '.' + series + '.'+ word + "." + params['format']
if path.exists(filename):
print("Results already saved - Corpus : " + corp_fn[1] + ", query :" + params['wlpat'] + ", filename :" + filename)
continue
print("SE wordlist query with parameters : " + word + ":"+ params['wlpat'] + ':' + corp)
res = query_sketchengine('wordlist',params)
time.sleep(5)
#print(res.text)
if res:
with open(filename, mode="w", encoding="utf-8") as fin:
if params['format']=='json':
json.dump(res,fin, indent=4)
elif params['format'] == 'csv':
fin.write(res.text)
print("Results saved - Corpus : " + corp_fn[1] + ", query :" + params['wlpat'] + ", filename :" + filename)
def combine_wordlist_results(series, dir_in = './res_wordlist/',dir_out='./res_wordlist_final/'):
files = glob.glob(dir_in + "*"+ series + "*.csv")
print(len(files),files)
# list of dataframes for each word
df = pd.DataFrame(columns=['word'])
for fn in files:
data = re.split(r"\/",fn) # get filename only
fndata = re.split(r"\.",data[len(data)-1]) # split to retrieve language and series morphem
#print(fndata)
lang2 = re.split(r'_', fndata[0])[0]
if lang2 in lang_corresp.keys():
lang = lang_corresp[lang2]
else:
print("Error with lang value :" + lang2 + '(filename :'+ fn + ')')
exit()
#print(lang)
morph = fndata[1]
freqfield = "freq_"+lang
# now read csv file into dataframe
df1 = pd.read_csv(fn, header=None,
quotechar='"',
sep=',',
names=['word',freqfield],
skiprows=3)
df1 = df1.astype(dtype= {"word":"object",freqfield:"float64"})
print("*"*50)
print("df1 : ", fn, df1.info())
df =
|
pd.merge(df,df1,how="outer",on="word")
|
pandas.merge
|
# -*- coding: utf-8 -*-
# time
import time
import datetime
# system
import os
import sys
import re
from IPython.display import HTML
# databases
import MySQLdb as mdb, MySQLdb.cursors as mdb_cursors
import sqlite3
# files
import codecs
# requests and others
import requests
import urllib
# data
import pandas as pd
# pywikibot
import pywikibot
PYWIKIBOT2_DIR = '/srv/wcdo/src_viz/user-config.py'
# scripts
sys.path.insert(0, '/srv/wcdo/src_data')
import wikilanguages_utils
class Logger(object): # this prints both the output to a file and to the terminal screen.
def __init__(self):
self.terminal = sys.stdout
self.log = open("meta_update.out", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self): pass
# MAIN
######################## WCDO CREATION SCRIPT ########################
def main():
publish_missing_ccc_articles_lists()
return
while true:
time.sleep(84600)
print ("Good morning. It is: "+time.today()+". Let's see if today is the day to publish some stats in meta...")
# CHAINED TO CCC CREATION (ONCE A MONTH) AND TOP CCC
if verify_time_for_iteration():
publish_wcdo_update_meta_pages()
######################################################################
# In this function we create the table language_territories_mapping. # CEE Spring.
def make_table_links_CEE():
territories = wikilanguages_utils.load_languageterritories_mapping()
languages_df = wikilanguages_utils.load_wiki_projects_information(territories);
languages = ['en','az','ba','be','be-tarask','bs','bg','crh','de','el','eo','et','hr','hsb','hu','hy','ka','kk','lt','lv','mk','myv','pl','ro','ru','sh','sq','sr','tr','tt','uk']
langu = ['az','ba','be','be_x_old','bs','bg','crh','de','el','et','hr','hsb','hu','hy','ka','kk','lt','lv','mk','myv','pl','ro','ru','sh','sq','sr','tr','tt','uk']
rows_langs = {'az':'Azerbaijan','ba':'Bashkortostan','be':'Belarus','be_x_old':'Belarus','bs':'Bosnia and Herzegovina','bg':'Bulgaria','crh':'','de':'Austria','eo':'','el':'Greece','et':'Estonia','hr':'Croatia','hsb':'Germany','hu':'Hungary','hy':'Armenia','ka':'Georgia','kk':'Kazakhstan','lt':'Lithuania','lv':'Latvia','mk':'Macedonia','myv':'Russia','pl':'Poland','ro':'','ru':'Russia','sh':'','sq':'Albania','sr':'Serbia','tr':'Turkey','tt':'Tatarstan','uk':'Ukrania'}
country_iso = {'Azerbaijan':'AZ','Belarus':'BY','Bosnia and Herzegovina':'BA','Bulgaria':'BG','Austria':'AT','Greece':'GR','Estonia':'EE','Croatia':'HR','Germany':'DE','Hungary':'HU','Armernia':'AM','Georgia':'GE','Kazakhstan':'KZ','Lithuania':'LT','Latvia':'LV','Macedonia':'MK','Russia':'RU','Poland':'PL','Albania':'AL','Serbia':'SR','Turkey':'TR'}
lists = ['editors', 'featured', 'geolocated', 'keywords', 'women', 'men', 'created_first_three_years', 'created_last_year', 'pageviews', 'discussions']
lists_dict = {'editors':'Editors', 'featured':'Featured', 'geolocated':'Geolocated', 'keywords':'Keywords', 'women':'Women', 'men':'Men', 'created_first_three_years':'Created First Three Years', 'created_last_year':'Created Last Year', 'pageviews':'Pageviews', 'discussions':'Discussions'}
columns_final = ['List']+languages
df_columns_list = columns_final
wikitext = ''
for language in langu:
wikitext+= "==="+languages_df.loc[language]['languagename']+"===\n"
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for lista in lists:
midline = '|-\n'
row_string = '|'
row_string += lists_dict[lista]+'||'
for row in languages:
if row == 'uk': add = ''
else: add = '||'
# create the URL
string = "https://wcdo.wmflabs.org/top_ccc_articles/?list="+lista
string += "&target_lang="+row
string += "&source_lang="+language
if rows_langs[language] in country_iso:
string += "&source_country=" + country_iso[rows_langs[language]].lower()
URL = '['+string+' '+' '+']'
row_string = row_string + str(URL) + add # here is the value
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
wikitext += wiki_table_string+'\n\n'
return wikitext
def publish_missing_ccc_articles_lists():
glow_langs = ['sd','id', 'jv', 'su', 'hi', 'ta', 'te', 'mr', 'kn', 'ml', 'or', 'pa', 'sa', 'gu', 'en', 'ar', 'es']
# glow_langs = ['sd']
# Bahsa Indonesia id, Bahsa Jawa jv, Bahsa Sunda su, Hindi hi, Tamil ta, Telugu te, Marathi mr, Kannada kn, Malyalam ml, Odia or, Punjabi pa, Sanskrit sa, Gujarati gu, English - Geolocated for Nigeria en, Arabic - Jordan, Egypt and Tunisia ar, Spanish - Geolocated for Argentina es, Sindhi sd.
for languagecode in glow_langs:
source_lang = 'None'
languagename = languages.loc[languagecode]['languagename']
try: qitems = territories.loc[languagecode]['QitemTerritory'].tolist()
except: qitems = [territories.loc[languagecode]['QitemTerritory']]
wikitext = ' = '+languagename+' Wikipedia Missing local articles =\n'
line = 'Language '+languagename+' is spoken in: '
i=0
for qitem in qitems:
i=i+1
regional = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['regional']
if regional == 'yes': regional = 'region'
else:
regional = 'country'
territoryname = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['territoryname']
ISO = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['ISO31662']
if ISO == '' or ISO == None:
ISO = territories.loc[territories['QitemTerritory'] == qitem].loc[languagecode]['ISO3166']
# if territoryname == None: territoryname = ''
if i==len(qitems)-1:
line = line + territoryname + ' ('+regional+' with ISO code '+ISO+') and '
else:
line = line + territoryname + ' ('+regional+' with ISO code '+ISO+'), '
line = line[:len(line)-2]+'.'
wikitext += 'This is the local content from '+languagename+' related territories that does not exist in '+languagename+' Wikipedia and yet it exists in other language editions, especially those of languages that are spoken also in these territories.\n'
wikitext += line+'\n\n'
# make_table_missing_ccc_articles(topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country):
wikitext += '== 500 Geolocated articles ==\n'
# 500 places
# GEOLOCATED
# 100 amb més interwiki
# 50 amb més inlinks from CCC
# 25 amb més bytes
# 25 amb més discussions
wikitext = wikitext + '=== 100 Geolocated articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_interwiki', 100, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 50 Geolocated articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_inlinks_from_original_CCC', 50, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Geolocated articles sorted by number of Bytes ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_bytes', 25, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Geolocated articles sorted by number of Edits in Talk Page ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_discussions', 25, 'None', 'None', 'geolocated', languagecode, source_lang, 'None')
wikitext += '\n\n'
# MONUMENTS AND BUILDINGS
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 Monuments and buildings articles == \n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Monuments and buildings articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('monuments_and_buildings', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# GLAM
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 GLAM articles ==\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 GLAM articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('glam', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# EARTH
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 Earth articles ==\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Earth articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('earth', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 200 people
# MEN
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 Men articles ==\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Men articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('men', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# WOMEN
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 Women articles ==\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_interwiki', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 Women articles sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('women', 'num_references', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 100 other topics
wikitext += '== 100 Food, music, paintings and sports articles ==\n'
# FOOD
# 25 amb més pageviews
wikitext = wikitext + '=== 25 Food articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('food', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# MUSIC
# 25 amb més pageviews
wikitext = wikitext + '=== 25 Music articles sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('music_creations_and_organizations', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# PAINTINGS
# 25 amb més pageviews
wikitext = wikitext + '=== 25 Paintings sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('paintings', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# SPORTS AND TEAMS
# 25 amb més pageviews
wikitext = wikitext + '=== 25 Sports sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('sport_and_teams', 'num_pageviews', 25, 'None', 'None', 'None', languagecode, source_lang, 'None')
wikitext += '\n\n'
# 100 general
# KEYWORDS
# 25 amb més interwiki
# 25 amb més inlinks from CCC
# 25 amb més pageviews
# 25 amb més referències
wikitext += '== 100 General language context-based articles ==\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Interwiki links ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_interwiki', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Incoming links from Local Content (CCC) ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_inlinks_from_original_CCC', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of Pageviews ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_pageviews', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
wikitext = wikitext + '=== 25 General articles with keywords sorted by number of References ===\n'
wikitext = wikitext + make_table_missing_ccc_articles('None', 'num_references', 25, 'None', 'None', 'keywords', languagecode, source_lang, 'None')
wikitext += '\n\n'
# new_path = languagecode+'.txt'
# new_days = open(new_path,'w')
# new_days.write(wikitext)
site = pywikibot.Site('meta','meta')
page = pywikibot.Page(site, 'User:Marcmiquel' + '/' + 'test'+'/'+languagecode)
page.save(summary="X", watch=None, minor=False,botflag=False, force=False, asynchronous=False, callback=None,apply_cosmetic_changes=None, text=wikitext)
def make_table_missing_ccc_articles(topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country):
print (topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country)
e = (topic, order_by, limit, target_region, type, ccc_segment, target_lang, source_lang, target_country)
charac = '_'.join(map(str,e))
conn = sqlite3.connect(databases_path + 'missing_ccc.db'); cur = conn.cursor()
# TARGET LANGUAGE
target_language = languages.loc[target_lang]['languagename']
if 'target_country' != 'None':
target_country = target_country.upper()
if target_country == 'NONE' or target_country == 'ALL': target_country = 'all'
else:
target_country = 'all'
if 'target_region' != 'None':
target_region = target_region.upper()
if target_region == 'NONE' or target_region == 'ALL': target_region = 'all'
else:
target_region = 'all'
# TOPIC
type = "missing"
# SOURCE lANGUAGE
source_lang=source_lang.lower() #
# CREATING THE QUERY
query = 'SELECT '
columns = ['num','source_lang','page_title','num_interwiki','num_pageviews']
query += '"[[:" || languagecode || ":|" || languagecode || "]]" as source_lang, "[{{fullurl:" || languagecode || ":"|| page_title ||"}} " || REPLACE(page_title,"_"," ") || "]" as page_title, num_pageviews, num_interwiki, '
if order_by in ['num_outlinks','num_inlinks','num_wdproperty','num_discussions','num_inlinks_from_original_CCC','num_outlinks_to_original_CCC','num_bytes','num_references']:
query += order_by+', '
columns = columns + [order_by]
query += '("label" || " " || "(" || label_lang || ")" ) as label_lang, " [{{fullurl:" || "wikidata" || ":" || qitem || "}} " || REPLACE(qitem,"_"," ") || "]" as qitem '
columns = columns + ['label_lang','qitem']
query += 'FROM '+target_lang+'wiki '
query += 'WHERE (page_title_original_lang IS NULL or page_id_original_lang IS NULL) '
if ccc_segment == 'keywords':
query += 'AND keyword_title IS NOT NULL '
if ccc_segment == 'geolocated':
query += 'AND (geocoordinates IS NOT NULL OR location_wd IS NOT NULL) '
if target_country != "none" and target_country != "all":
query += 'AND iso3166 = "'+target_country+'" '
if target_region != "none" and target_region != "all":
query += 'AND iso31662 = "'+target_region+'" '
if topic != "none" and topic != "None" and topic != "all":
if topic == 'men': # male
query += 'AND gender = "Q6581097" '
elif topic == 'women': # female
query += 'AND gender = "Q6581072" '
elif topic == 'people':
query += 'AND gender IS NOT NULL '
else:
query += 'AND '+topic+' IS NOT NULL '
if source_lang == 'coexist':
query += 'AND non_language_pairs IS NULL '
elif source_lang == 'nocoexist':
query += 'AND non_language_pairs == 1 '
elif source_lang != "none":
query += 'AND languagecode = "'+source_lang+'" '
query += 'AND (num_inlinks_from_original_CCC!=0 OR num_outlinks_to_original_CCC!=0) '
if order_by == "none" or order_by == "None":
query += 'ORDER BY num_pageviews DESC '
else:
query += 'ORDER BY '+order_by+' DESC '
query += 'LIMIT 500;'
# if limit == "none":
# query += 'LIMIT 100;'
# else:
# query += 'LIMIT '+str(limit)+';'
print(query)
df = pd.read_sql_query(query, conn)#, parameters)
df = df.fillna(0)
if len(df)==0: return ''
page_titles = df.page_title.tolist()
for i in range(0,len(page_titles)-1):
page_title = page_titles[i].split('}}')[1].strip()
page_titles[i] = page_title[:len(page_title)-1]
# print (page_titles)
mysql_con_read = wikilanguages_utils.establish_mysql_connection_read(target_lang); mysql_cur_read = mysql_con_read.cursor()
page_titles_existing = []
page_asstring = ','.join( ['%s'] * len(page_titles) )
query = 'SELECT ll_title FROM langlinks WHERE ll_title IN (%s)' % page_asstring
mysql_cur_read.execute(query, page_titles) # Extreure
result = mysql_cur_read.fetchall()
for row in result:
page_titles_existing.append(row[0].decode('utf-8'))
df.num_pageviews = df.num_pageviews.astype('int64')
i = 0
target_langy = '('+target_lang +')'
qitems_list = []
for index, row in df.iterrows():
page_title = row['page_title'].split('}}')[1].strip()
page_title = page_title[:len(page_title)-1]
label_lang = row['label_lang']
if label_lang == 0 or target_langy not in label_lang:
df.loc[index, 'label_lang'] = ''
else:
label_lang = label_lang.split('(')[0].strip()
df.loc[index, 'label_lang'] = '[{{fullurl:'+target_lang+':'+label_lang.replace(' ','_')+'}} '+label_lang+']'
if row['qitem'] in qitems_list or i>=limit or page_title in page_titles_existing:
df.drop(index, inplace=True)
else:
# print ((row['page_title']))
qitems_list.append(row['qitem'])
i+=1
column_list_dict = {'source_lang':'Wiki','page_title':'Title','num_pageviews':'Pageviews','num_interwiki':'Interwiki', 'num_inlinks_from_original_CCC':'Inlinks CCC','num_references':'References','num_bytes':'Bytes','num_discussions':'Discussions','label_lang':target_language+' WD Label','qitem':'WD Qitem'}
df=df.rename(columns=column_list_dict)
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
path = '/srv/wcdo/src_viz/missing_ccc'
if not os.path.exists(path):
os.makedirs(path)
path2 = path+'/'+target_lang
if not os.path.exists(path2):
os.makedirs(path2)
file_name = path2+'/missing_ccc_'+target_lang+'_'+charac+'.txt'
df.to_csv(file_name, sep='\t', encoding='utf-8')
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
row_string = row_string + str(value) + add # here is the value
# here we might add colors. -> it would be nice to make a different colour for each language background, so it would be easy to see when one starts and another finishes.
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
if len(df_rows)==0:
wiki_table_string = ''
return wiki_table_string
### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### ---
# TABLES
# function name composition rule: x, y, (rows, columns)
# In this function we create the table language_territories_mapping.
def make_table_language_territories_mapping():
df =
|
pd.read_csv(databases_path + 'language_territories_mapping.csv',sep='\t',na_filter = False)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 21:23:17 2020
@author: kakdemi
"""
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#########################################################
# Weight by zone
#########################################################
#importing LMP data
Daily_sim = pd.read_excel('validation_prices.xlsx', sheet_name='Daily_sim')
Hourly_sim = pd.read_excel('validation_prices.xlsx', sheet_name='Hourly_sim')
Hourly_hist = pd.read_excel('validation_prices.xlsx', sheet_name='hist_hourly')
Daily_hist = pd.read_excel('validation_prices.xlsx', sheet_name='hist_daily')
#determining number of days and hours for multivariate regression
num_days = int(len(Hourly_sim)/24)
num_hours = len(Hourly_sim)
#creating a linear regression model for hourly simulations
hourly_x = Hourly_sim.copy()
hourly_y = Hourly_hist.loc[:,'LMP'].copy()
hourly_reg = linear_model.ElasticNet(positive=True, max_iter=100000)
hourly_reg.fit(hourly_x,hourly_y)
sim_hourly = np.zeros((num_hours,1))
#finding zonal weights by using the regression and predicting overall LMP
for i in range(0,num_hours):
s = Hourly_sim.loc[i,:].values
s = s.reshape((1,len(s)))
sim_hourly[i] = hourly_reg.predict(s)
#saving the weighted hourly prices
SH =
|
pd.DataFrame(sim_hourly)
|
pandas.DataFrame
|
"""
Importing necessary libraires.
"""
import tweepy
import json
import re
import string
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.models import model_from_json
import random
from flask import Flask,render_template,url_for,request
import numpy as np
import emoji
app = Flask(__name__)
"""
Function to render page http://127.0.0.1:5000/
"""
@app.route('/')
def hello(st=''):
print("HOME")
return render_template('home.html',title='home')
"""
Function to render page http://127.0.0.1:5000/analysis
"""
@app.route('/analysis',methods=['POST','GET','OPTIONS'])
def analysis():
"""
Taking search query into the variable 'key'.
"""
key=request.form['InputText']
"""
Performing authentication to access twitter's data.
(Use twitter developer credentials below and uncomment the following piece commented code).
"""
"""
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
"""
"""
Creating an api object using tweepy.
"""
api = tweepy.API (auth)
"""
Fetching tweets and storing them in results array. 'num' variable denotes the number of tweets to be fetched.
"""
results = []
num = 50
for tweet in tweepy.Cursor (api.search, q = key, lang = "en").items(num):
results.append(tweet)
"""
Creating a pandas dataframe to capture tweet information.
"""
dataset=pd.DataFrame()
dataset["tweet_id"]=pd.Series([tweet.id for tweet in results])
dataset["username"]=pd.Series([tweet.author.screen_name for tweet in results])
dataset["text"]=pd.Series([tweet.text for tweet in results])
dataset["followers"]=pd.Series([tweet.author.followers_count for tweet in results])
dataset["hashtags"]=pd.Series([tweet.entities.get('hashtags') for tweet in results])
dataset["emojis"]=pd.Series([','.join(c for c in tweet.text if c in emoji.UNICODE_EMOJI) for tweet in results])
"""
Following piece of code is used to generate wordcloud of the hashtags used in fetched tweets
"""
Hashtag_df =
|
pd.DataFrame(columns=["Hashtag"])
|
pandas.DataFrame
|
# logging setup
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s: %(asctime)s\n%(message)s')
file_handler = logging.FileHandler('logs/features.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# import libraries
import pandas as pd
from tabulate import tabulate
# count null values
def count_nulls(df):
return [df[col].isna().sum() for col in df.columns]
# return data types
def data_types(df):
return [df[col].dtype for col in df.columns]
# return summary statistics
def summary_stats(df, assign):
stats_list = []
for assi in assign.col_map:
stats = ""
if assi[1] == "quant":
stats = "min: {}, max: {}, mean: {:f}".format(df[assi[0]].min(), df[assi[0]].max(), df[assi[0]].mean())
elif assi[1] == "cat":
count = df[assi[0]].value_counts(sort = True) # .astype('int64')
percent = df[assi[0]].value_counts(normalize = True, sort = True)
values =
|
pd.DataFrame({"count": count, "percent": percent})
|
pandas.DataFrame
|
#!/usr/bin/python3
# -----------------------------------------------------------
# Calculator to add school year features to a dataset
# -----------------------------------------------------------
import pandas as pd
# pylint: disable=too-many-locals
def add_feature_school_year(dataset, date_col, date_format, data_path):
""""
given a dataframe with a date_col of format date_format and a date index
add a new column annee_scolaire using an external csv located in data_path using the same date_format
"""
# generate all dates within start and end
start = dataset[date_col].min()
end = dataset[date_col].max()
all_dates =
|
pd.date_range(start, end, freq="D")
|
pandas.date_range
|
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
from scipy.stats import rankdata
from impetuous.quantification import qvalues, permuter
from rankor.quantification import pi0
from rankor.contrasts import contrast
def svd_reduced_mean ( x,axis=0,keep=[0] ) :
if True :
sk = set ( keep )
if len ( np.shape(x) ) > 1 :
u , s , vt = np .linalg .svd( x , full_matrices=False )
xred = np.mean( np.dot(u*[s[i_] if i_ in sk else 0 for i_ in range(len(s))],vt) , axis)
if 'pandas' in str(type(x)) :
if not 'series' in str(type(x)) :
xname = x.index.values[0]
return ( pd.DataFrame( [xred] , index=[xname] , columns=x.columns ) )
else :
xname = x.name
return ( pd.Series( xred , name=xname , index=x.columns ) )
else :
return ( xred )
return ( x )
from sklearn.decomposition import PCA
dimred = PCA ( n_components = 1 )
def pca_reduced_mean( x ) :
if True :
if len ( np.shape(x) ) > 1 :
Xnew = dimred.fit_transform( x.T )
xred = Xnew . T [0] + np.mean(np.mean(x))
if 'pandas' in str(type(x)) :
if not 'series' in str(type(x)) :
xname = x.index.values[0]
return ( pd.DataFrame( [xred] , index=[xname] , columns=x.columns ) )
else :
xname = x.name
return ( pd.Series( xred , name=xname , index=x.columns ) )
return ( x )
def reduction ( a , power , centered=-1 ) :
if centered>0 :
a = ( a.T-np.mean(a,1) ).T
return( np.linalg.svd ( a**power , full_matrices=False ) )
def hyper_params ( df_ , label = 'generic' , sep = ',' , power=1., centered=-1 ):
#
idx_ = df_.index.values
N_s = len ( df_.columns )
u,s,vt = reduction( df_.values , power , centered=centered )
rdf_ = pd.Series ( np.sum(u**2,1) , index=idx_ , name = label+sep+"u" )
rdf_ = pd.concat ( [
|
pd.DataFrame(rdf_)
|
pandas.DataFrame
|
'''
Number: 4
This file models sequences of words using the statistical properties of n-grams.
I follow the Markov assumption (or independence assumption).
As for probabilities, I use and implement the Kneser-Ney Smoothing method.
'''
import pandas as pd
# --> Unigrams Probabilities (something wrong with this)
def kneserNey_prob_uni(profanity=False, pickled=True):
'''
profanity: bool - if True, it works in the dtm with profanities filterd out
pickled: bool - if True the result is pickled
keep: int - keep the top-keep unigrams when pickled; give it -1 if you want to keep all
'''
# read the DTM for bigrams
path = ''
if profanity:
path = 'pickles/DTMs/dtm-bi/integrated/dtm_bi_badnot.pkl'
else:
path = 'pickles/DTMs/dtm-bi/integrated/dtm_bi.pkl'
dtm_bi =
|
pd.read_pickle(path)
|
pandas.read_pickle
|
#from dqn_env import TrainLine
import sys
sys.path.append('.\subway_system')
from subway_env import TrainLine
from RL_brain import DeepQNetwork
import numpy as np
import matplotlib.pyplot as mplt
import tensorflow as tf
import pandas as pd
import TrainAndRoadCharacter as trc
def plot(r,ylabel):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(r)), r, linewidth=1)
plt.ylabel(ylabel)
plt.xlabel('training episodes')
plt.savefig("./img/"+ylabel+".png")
plt.show()
def draw_mean(r,ylabel):
import matplotlib.pyplot as plt
x_10 = []
temp = []
count = 0
for i in range (len(r)):
temp.append(r[i])
count += 1
if count >= 10:
x_10.append(sum(temp) / 10)
temp = []
count = 0
plt.plot(np.arange(len(x_10)), x_10, linewidth=1)
plt.ylabel('mean' + ylabel)
plt.xlabel('training episodes X10')
plt.savefig("./img/"+'mean' +ylabel+".png")
plt.show()
def run_train():
total_step = 0
Max_iteras= 3000
for episode in range(Max_iteras):
#训练5000次
r1_max = 0
step = 0
r1 = 0
pl=[] #位置
vl=[] #速度
ul=[] #加速度
al=[] #动作
# initial observation
observation = env.reset()
#env.bef_print()
while True:
# fresh env
#env.render()
# RL choose action based on observation
action = RL.choose_action(observation)
#强行推上曲线
pos = observation[0] * env.S
veo = observation[1] * env.max_speed
if pos <100 and veo < env.avg_speed:
action = 8
# RL take action and get next observation and reward
observation_,E,reward, done, action = env.step(action) # action =0-6 最后会被转换到转化为[-0.3, 0.3]
r1 = r1 * 0.99 + reward
RL.store_transition(observation, action, reward, observation_)
if (total_step > 5000 and total_step % 32 == 0 ):
RL.learn()
# swap observation
observation = observation_
# o1 =observation
if episode%20==0 or episode==Max_iteras-1:
pl.append(pos)
vl.append(veo)
ul.append(observation[3])
al.append(action)
# break while loop when end of this episode
if done:
# env.subFilterFactor(Max_iteras) #减少平滑因子
r.append(r1)
energy.append(E)
print(observation_[2]*env.T,env.TErrorSum,env.filterFactor,RL.epsilon)
RL.increase_epsilon()
tlist.append(observation_[2]*env.T)
#曲线判定函数,决定是否保存曲线 :旅行距离是否合适,时间是否接近,以episode_speed.csv为 文件名
if r1 > r1_max and episode>1500 and episode%20 == 0:
r1_max =r1
Curve=np.mat([pl,vl,ul,al])
CurveData=pd.DataFrame(data=Curve.T,columns=['s','v','acc','action'])
CurveData.to_csv("./Curve/"+str(episode)+"_CurveData.csv")
if episode==Max_iteras-1:
print(r1)
# f1 = open('datat.txt', 'r+')
# f1.read()
# print(episode, (step + 5)/5, file=f1)
# f1.close()
r.append(r1)
print('Episode finished after {} timesteps'.format((step + 5)/5))
break
# if (5000 > episode >= 4500):
# print(o1)
# f2 = open('vs.txt', 'r+')
# f2.close()
# break
step += 1
total_step += 1
#最后打印结果
print(episode)
if episode%20 ==0 or episode==Max_iteras-1:
trc.plotSpeedLimitRoadGrad('relative')
mplt.plot(pl,vl)
mplt.savefig("./img/"+str(episode)+"v-s.png")
mplt.show()
mplt.plot(pl,ul)
mplt.savefig("./img/"+str(episode)+"u-s.png")
mplt.show()
draw_mean(al,str(episode)+"action-s")
# mplt.savefig("./img/"+str(episode)+"action-s.png")
# mplt.show()
return
# end of game
if __name__ == "__main__":
print("path:"+sys.path[0])
global r,energy,tlist,RL
tf.reset_default_graph()
env = TrainLine(110)
env.seed(1)
RL = DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.0001,
reward_decay=0.99, #奖励折扣
e_greedy=0.6, #探索效率
replace_target_iter=512,
memory_size=10000,
batch_size=256,
e_greedy_increment=0.35/3000,
# output_graph=True
)
# RL.LoadModel()
energy = []
r = []
tlist = []
run_train()
RL.plot_cost()
plot(r,'reward')
plot(energy,'energy')
plot(tlist,'time')
draw_mean(r,'reward')
draw_mean(energy,'energy')
draw_mean(tlist,'time')
draw_mean(RL.cost_his,'mean_cost')
rdata = pd.DataFrame(r)
rdata.to_csv("reward.csv")
tdata =
|
pd.DataFrame(tlist)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import asteval
import sys
from cytoolz.curried import map, curry
from cytoolz.functoolz import thread_last
from cytoolz.dicttoolz import assoc_in
from dask import delayed
import re
from survey_stats import log
from survey_stats.etl import download as dl
from survey_stats import pdutil
logger = log.getLogger(__name__)
def unstack_facets(df, unstack):
if not unstack:
return df
logger.info('unstacking facet columns', shape=df.shape, unstack=unstack)
for k, v in unstack.items():
fcts = list(df[k].drop_duplicates())
for c in fcts:
df[c] = 'Total'
df[c][df[k] == c] = df[v][df[k] == c]
logger.info('unstacked facet column', col=c,
facets=df[c].value_counts(dropna=False).to_dict())
logger.info('unstacking facet columns', shape=df.shape, cols=df.columns,
unstack=unstack)
return df
def fold_stats_cols(df, folds):
if not folds:
return df
logger.info('folding df stats', shape=df.shape, folds=folds,
cols='|'.join(df.columns))
cols = list(df.columns)
yes_cols = folds['y']
no_cols = folds['n']
fixed_cols = list(set(cols) - set(yes_cols + no_cols))
yes_df = df[fixed_cols + yes_cols]
no_df = df[fixed_cols + no_cols]
yes_df['response'] = 'Yes'
no_df['response'] = 'No'
no_df.columns = yes_df.columns
df =
|
pd.concat([yes_df, no_df], ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Functions used to find happymeal product categories:
- Exclus
- Viande, oeufs
- Poisson
- Produits gras sucrés salés
- Matières grasses ajoutées
- Produits laitiers (hors fromage)
- Fromage
- Féculents raffinés
- Féculents non raffinés
- Fruits
- Légumes
- Plats préparés
"""
__author__ = '<NAME>'
__license__ = 'MIT License'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__status__ = 'Development'
import sys
import pandas as pd
import pickle
import openfoodfacts
def get_foodGroup(EAN,
Produit_Nom,
convert_groups,
model_classifier,
model_matching):
'''
-- Input --
EAN: EAN code, string
Produit_Nom: name of the product, string
convert_groups: dictionnary which enables to create a family of food from OpenFoodFacts' groups
model_classifier: model which predicts the foodgroups from nutrients
model_matching: model which predicts the foodgroups from names
-- Output --
food_group: the group of the product, string
statut: how the foodgroup has been obtained
1 - the product is in OFF and belongs to a well defined foodgroup
2 - the product is in OFF and its foodgroup is predicted from nutrients
3 - the product is not in OFF and its foodgroup is predicted from its name
-- Examples --
get_foodGroup(EAN = "4260436322114", Produit_Nom = None)
get_foodGroup(EAN = "5410233710105", Produit_Nom = None)
get_foodGroup(EAN = "hgbjnklhgc", Produit_Nom = "Pizza")
'''
try: # incase of missing EAN
product_off = openfoodfacts.products.get_product(str(EAN)) # gets the product from Open Food Facts
except:
pass
try: # manages to get info on pnns_groups_2
product_off_groups2 = product_off['product']['pnns_groups_2']
if product_off_groups2 in convert_groups.keys(): # if the product of OFF belongs to a well defined group
foodgroup = convert_groups[product_off_groups2]
statut = 1
return [foodgroup, statut]
except: pass
try: # manages to get info on nutriments
# looks for nutrients
df_nutrients = pd.DataFrame([product_off['product']['nutriments']],
dtype='float64')[['salt_100g', 'fat_100g', 'sugars_100g', 'proteins_100g', 'carbohydrates_100g', 'saturated-fat_100g']]
# We will predict if and only if the values are valid
df_nutrients = df_nutrients[df_nutrients['salt_100g'] <= 100]
df_nutrients = df_nutrients[df_nutrients['sugars_100g'] <= 100]
df_nutrients = df_nutrients[df_nutrients['carbohydrates_100g'] <= 100]
df_nutrients = df_nutrients[df_nutrients['fat_100g'] <= 100]
df_nutrients = df_nutrients[df_nutrients['proteins_100g'] <= 100]
df_nutrients = df_nutrients[df_nutrients['saturated-fat_100g'] <= 100]
n_row = df_nutrients.shape[0] # 1 if values are correct, 0 if one value over 100
if n_row == 1: # no missing values and no weird values
# then predicts the foodgroup from nutrients
foodgroup = model_classifier.predict(df_nutrients[['salt_100g', 'sugars_100g',
'carbohydrates_100g', 'fat_100g',
'proteins_100g', 'saturated-fat_100g']])[0]
statut = 2
return [foodgroup, statut]
except:
pass
try: # manages to predicts the foodgroup from the name
foodgroup = model_matching.predict([Produit_Nom])[0]
statut = 3
return [foodgroup, statut]
except: # arggg
return [None, None]
def get_foodGroupFromToDF(listing_df,
EAN_col,
product_name_col,
mapping_file,
model_classifier_file,
model_matching_file,
group_name):
'''
-- Input --
listing_df: listing of food products we want to put in balanced meals, as dataframe
(contains at least EAN_col and product_name_col)
EAN_col: column containing EAN code, as string
product_name_col: column containing the product name, as string
mapping_file: path of file which enables to map OpenFoodFacts' groups to our food groups
model_classifier_file: path of file containing model which predicts the food groups from nutrients
model_matching_file: path of file containing model which predicts the food groups from names
group_name: specify output level of categorization ("labelAlim_1" or "labelAlim_2")
-- Output --
listing_df: the same dataframe, with 2 columns added
labelAlim_1 or labelAlim_2: food group for balanced meals
statutAlim_1 or statutAlim_2: how the foodgroup has been obtained
-- Example --
get_foodGroupFromToDF(listing_df = input_listing,
EAN_col = 'EAN',
product_name_col = 'Produit_Nom',
mapping_file = 'data/mapping_off_ideal.csv',
model_classifier_file = 'data/clf_nutrients_rf_groupeAlim_2_light.sav',
model_matching_file = 'data/clf_names_nb_light.sav'
group_name = 'labelAlim_1')
'''
# Check if listing_df contains EAN_col and product_name_col
if pd.Series([EAN_col, product_name_col]).isin(listing_df.columns).sum() < 2:
sys.exit(EAN_col + ' or ' + product_name_col + ' is not in dataframe')
else:
# Model to get the foodgroup of a product which is in the Open Food Facts database
clf_nutrients_rf = pickle.load(open(model_classifier_file, 'rb'))
# Model to get the foodgroup of a product which is not in the Open Food Facts database
clf_names_nb = pickle.load(open(model_matching_file, 'rb'))
# Mapping file
mapping_groups =
|
pd.read_csv(mapping_file, sep=';', encoding='UTF-8')
|
pandas.read_csv
|
import pandas as pd
def _check_necessary_opf_parameters(net, logger):
# Check if all necessary parameters are given:
opf_col = {
'ext_grid': pd.Series(['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar']),
'gen': pd.Series(['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar']),
'sgen': pd.Series(['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar']),
'load': pd.Series(['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar']),
'storage':
|
pd.Series(['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar'])
|
pandas.Series
|
from typing import Union, Tuple, Sequence, Any
import collections.abc
import glob
import logging
import os
import numpy as np
import pandas as pd
from numpy.random import RandomState
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from .label_behavior import LabelBehavior
logger = logging.getLogger(__name__)
"""
Module which contains functionality for generating experiments
"""
class ClassicExperiment:
"""
Defines a classic experiment, which consists of: 1) a specification of the clean data 2) a specification of the
modified (triggered) data, and 3) a specification of the split of triggered/clean data for training/testing
the model
"""
def __init__(self, data_root_dir: str, trigger_label_xform: LabelBehavior, stratify_split: bool = True) -> None:
"""
Initializes a Classic experiment object
:param data_root_dir: the root directory under which all data lives under. The expected directory structure
for any dataset is as follows:
root_dir
|- clean_data
|- modification_1
|- modification_2
|- ...
This is needed so that the proper relative path can be computed from the root directory.
Additionally, it is required that filenames correspond across the different subfolders under
root_dir. Practically, this means
:param trigger_label_xform: a LabelBehavior object specifying how triggered data is changed
:param stratify_split: if True, then data is split such that each class has the same number of samples in
the produced experiment
"""
self.data_root_dir = data_root_dir
self.stratify_split = stratify_split
self.trigger_label_xform = trigger_label_xform
def create_experiment(self, clean_data_csv: str, experiment_data_folder: str,
mod_filename_filter: str = '*', split_clean_trigger: bool = False,
trigger_frac: float = 0.2, triggered_classes: Union[str, Sequence[Any]] = 'all',
random_state_obj: RandomState = RandomState(1234)) \
-> Union[Tuple, pd.DataFrame]:
"""
Creates an "experiment," which is a dataframe defining the data that should be used, and whether that data is
triggered or not, and the true & actual label associated with that data point.
TODO:
[] - Have ability to accept multiple mod_data_folders such that we can sample from them all at a specified
probability to have different triggers
:param clean_data_csv: path to file which contains a CSV specification of the clean data. The CSV file is
expected to have the following columns: [file, label]
:param experiment_data_folder: the folder which contains the data to mix with for the experiment.
:param mod_filename_filter: a string filter for determining which files in the folder to consider, if only a
a subset is to be considered for sampling
:param split_clean_trigger: if True, then we return a list of DataFrames, where the triggered & non-triggered
data are combined into one DataFrame, if False, we concatenate the triggered and non-triggered data
into one DataFrame
:param trigger_frac: the fraction of data which which should be triggered
:param triggered_classes: either the string 'all', or a Sequence of labels which are to be triggered. If
this parameter is 'all', then all classes will be triggered in the created experiment. Otherwise,
only the classes in the list will be triggered at the percentage requested in the trigger_frac
argument of the create_experiment function.
:param random_state_obj: random state object
:return: a dataframe of the data which consists of the experiment. The DataFrame has the following columns:
file, true_label, train_label, triggered
file - the file path of the data
true_label - the actual label of the data
train_label - the label of the data the model should be trained on.
This will be equal to true_label *if* triggered==False
triggered - a boolean value indicating whether this particular sample has a Trigger or not
"""
logger.info("Creating experiment from clean_data:%s modified_data:%s" %
(clean_data_csv, experiment_data_folder))
# get absolute paths to avoid ambiguities when generating output paths
experiment_data_folder = os.path.abspath(experiment_data_folder)
clean_df = pd.read_csv(clean_data_csv)
clean_df['filename_only'] = clean_df['file'].map(os.path.basename)
if isinstance(triggered_classes, str) and triggered_classes == 'all':
num_trigger = int(len(clean_df) * trigger_frac)
else:
if isinstance(triggered_classes, collections.abc.Sequence):
num_total_in_triggered_classes = 0
for c in triggered_classes:
num_total_in_triggered_classes += len(clean_df[clean_df['label'] == c])
num_trigger = int(num_total_in_triggered_classes*trigger_frac)
else:
msg = "triggered_classes must either be 'all' or a list of labels to trigger"
logger.error(msg)
raise ValueError(msg)
# find list of files in the mod data folder that match the input filter & the trigger_classes specification
mod_flist = glob.glob(os.path.join(experiment_data_folder, mod_filename_filter))
mod_flist.sort()
if isinstance(triggered_classes, str):
# we need the if/elif b/c a str is also a collections.abc.Sequence
pass
elif isinstance(triggered_classes, collections.abc.Sequence):
# get only the filenames associated with each label of interest
mod_flist_fname_only = [os.path.basename(x) for x in mod_flist]
mod_flist = []
for c in triggered_classes:
class_clean_files = set(clean_df[clean_df['label'] == c]['filename_only'])
intersected_fname_only = class_clean_files.intersection(mod_flist_fname_only)
intersected_fname_with_path = [os.path.join(experiment_data_folder, x) for x in intersected_fname_only]
mod_flist.extend(intersected_fname_with_path)
if not self.stratify_split:
mod_flist_subset = random_state_obj.choice(mod_flist, num_trigger, replace=False)
logger.info("Created unstratified dataset from %s for including in experiment" % (experiment_data_folder,))
else:
# get overlap between files which exist in the directory and files which were converted
# and pick stratification based on the original label
orig_flist = set(clean_df['filename_only'])
mod_flist_fname_only = set([os.path.basename(x) for x in mod_flist])
common_flist = list(orig_flist.intersection(mod_flist_fname_only))
df_subset_to_stratify = clean_df[clean_df['filename_only'].isin(common_flist)]
# get the trigger fraction percentage based on class-label stratification
if trigger_frac > 0:
try:
num_trigger = min(len(df_subset_to_stratify)-1, num_trigger)
num_classes = len(df_subset_to_stratify['label'].unique())
if (len(df_subset_to_stratify) - num_trigger) < num_classes:
# ensure that we have enough to split
num_trigger -= num_classes
df_flist, _ = train_test_split(df_subset_to_stratify,
train_size=num_trigger,
random_state=random_state_obj,
stratify=df_subset_to_stratify['label'])
logger.info("Created stratified dataset from %s for including in experiment" %
(experiment_data_folder,))
except ValueError as e:
logger.exception(e)
logger.error("Error creating experiment, likely because the fraction of triggered data specified "
"creates a data split where not all classes are represented!")
raise ValueError(e)
else:
# empty dataframe with no entries, meaning that no data is triggered
df_flist =
|
pd.DataFrame(columns=['file', 'label', 'filename_only'])
|
pandas.DataFrame
|
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from math import sqrt
from typing import Dict, Union
import pandas as pd
from gs_quant.api.gs.data import GsDataApi
from gs_quant.data.core import DataContext
from gs_quant.datetime import date
from gs_quant.errors import MqValueError
from gs_quant.models.risk_model import FactorRiskModel, ReturnFormat
from gs_quant.target.data import DataQuery
class Factor:
def __init__(self, risk_model_id: str, factor_name: str):
risk_model = FactorRiskModel(risk_model_id)
factor_data = risk_model.get_factor_data(format=ReturnFormat.JSON)
name_matches = [factor for factor in factor_data if factor['name'] == factor_name]
if not name_matches:
raise MqValueError(f'Factor with name {factor_name} does not in exist in risk model {risk_model_id}')
factor = name_matches.pop()
self.__risk_model_id: str = risk_model_id
self.__id = factor['identifier']
self.__name: str = factor['name']
self.__type: str = factor['type']
self.__category: str = factor.get('factorCategory')
@property
def id(self):
return self.__id
@property
def name(self):
return self.__name
@property
def type(self):
return self.__type
@property
def category(self):
return self.__category
@property
def risk_model_id(self):
return self.__risk_model_id
def covariance(self,
factor,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->covariance values between this factor and another for a date
range """
covariance_data_raw = GsDataApi.execute_query(
'RISK_MODEL_COVARIANCE_MATRIX',
DataQuery(
where={"riskModel": self.risk_model_id, "factorId": self.id},
start_date=start_date,
end_date=end_date
)
).get('data', [])
date_to_matrix_order = factor.__matrix_order(start_date, end_date)
covariance_data = {}
for data in covariance_data_raw:
date = data['date']
if date_to_matrix_order.get(date):
matrix_order_on_date = date_to_matrix_order[date]
covariance_data[date] = data[matrix_order_on_date]
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(covariance_data, orient='index', columns=['covariance'])
return covariance_data
def variance(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->variance values for a factor over a date range """
variance_data = self.covariance(self, start_date, end_date, ReturnFormat.JSON)
if format == ReturnFormat.DATA_FRAME:
return pd.DataFrame.from_dict(variance_data, orient='index', columns=['variance'])
return variance_data
def volatility(self,
start_date: date = DataContext.current.start_date,
end_date: date = DataContext.current.end_date,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[Dict, pd.DataFrame]:
""" Retrieve a Dataframe or Dictionary of date->volatility values for a factor over a date range """
variance = self.variance(start_date, end_date, ReturnFormat.JSON)
volatility_data = {k: sqrt(v) for k, v in variance.items()}
if format == ReturnFormat.DATA_FRAME:
return
|
pd.DataFrame.from_dict(volatility_data, orient='index', columns=['volatility'])
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python
# <NAME>
# Convert an antiSMASH TSV file taken from 'txt/*_BGC.txt' file into a Candidate CSV file
import argparse
import pandas as pd
import numpy as np
def antismash_tsv_candidates(export):
"""
Convert an antiSMASH TSV file taken from 'txt/*_BGC.txt' into a Candidate DataFrame
:param export: DataFrame of antiSMASH TSV file taken from 'txt/*_BGC.txt'
:return: Candidate DataFrame
"""
candidates =
|
pd.DataFrame()
|
pandas.DataFrame
|
#<NAME>
#30/11/21
#Some basic college coding - NDVI, Advanced list manipulations & plotting
########################
#Imports & Inits
########################
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import seaborn as sns
from sklearn.linear_model import LinearRegression
########################
# GET EXCEL FILE
########################
location = "C:\\Data\\Remote_Sensing\\CourseData\\Remotesensing(1)\\Achterhoek_FieldSpec_2008.xlsx"
matrix = pd.read_excel(location)
first = pd.ExcelFile("C:\\Data\\Remote_Sensing\\CourseData\\Remotesensing(1)\\Achterhoek_FieldSpec_2008.xlsx")
second = pd.read_excel(first, 'Field_sampling')
# matrix.plot(x='Unnamed: 0', y= ['plot', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'])
# matrix.plot(x='Unnamed: 0')
# matrix[['Unnamed: 0', 'plot', 'Unnamed: 2', 'Unnamed: 3']].plot(x='Unnamed: 0')
########################
# CREATE DATA FRAMES
########################
fresh_weight = pd.DataFrame(data=second.iloc[0, 1:])
N_concentration =
|
pd.DataFrame(data=second.iloc[2, 1:])
|
pandas.DataFrame
|
import json
import os.path
from copy import copy
from typing import List, Tuple
from pandas import DataFrame
from fakeme.rules import default_rules
from fakeme.utils import log
class FieldRulesExtractor(object):
file_name = "rules.json"
def __init__(self, fields, paths_list=None):
if paths_list:
paths_list = []
self.fields = self.extract_fields(fields)
self.paths_list = paths_list
@staticmethod
def extract_fields(fields):
_fields = set([])
fields_with_fixed_rules = [line["field"] for line in FieldRules.user_rules]
for table in fields:
[
_fields.add(field) if field not in fields_with_fixed_rules else None
for field in table[1]
]
return _fields
def user_rules_processing(self) -> Tuple[List]:
field_rules = []
fields_with_rules = []
for rule in FieldRules.user_rules:
# todo: all rules extract need to refactor
if "*" in rule["field"]:
key = rule["field"].split("*")[1].lower()
for field in self.fields:
if key in field.lower():
field_rule = copy(rule)
field_rule["field"] = field
fields_with_rules.append(field)
field_rules.append(field_rule)
else:
for field in self.fields:
if rule["field"].lower() == field.lower():
fields_with_rules.append(field)
field_rules.append(rule)
return field_rules, fields_with_rules
def rules_extracts(self):
field_rules, fields_with_rules = self.user_rules_processing()
for field in self.fields:
if field not in fields_with_rules:
for key in default_rules:
if key in field.lower():
field_rule = copy(default_rules[key])
break
else:
field_rule = copy(default_rules["default"])
field_rule["field"] = field
field_rules.append(field_rule)
return field_rules
def generate_rules(self, remove_existed=True):
if not remove_existed and os.path.isfile(self.file_name):
log.info("{} with rules founded in {}".format(self.file_name, os.getcwd()))
else:
values_rules_dict = self.rules_extracts()
with open(self.file_name, "w+") as outfile:
json.dump(values_rules_dict, outfile, indent=2)
log.info("{} with rules for fields was created".format(self.file_name))
return True
class FieldRules(object):
user_rules = []
def __init__(self):
try:
with open(FieldRulesExtractor.file_name, "r") as json_file:
list_with_field_rules = json.load(json_file)
except IOError:
list_with_field_rules = []
dict_none_duplicates = {}
for line in list_with_field_rules:
dict_none_duplicates[line["field"]] = line
for line in FieldRules.user_rules:
dict_none_duplicates[line["field"]] = line
final_rules_list = [dict_none_duplicates[line] for line in dict_none_duplicates]
self.rules =
|
DataFrame.from_records(final_rules_list)
|
pandas.DataFrame.from_records
|
"""
lstmApp
Author: <NAME>
Demonstration LSTM model to predict categorical price increases greater than a
specified tolerance from current and past indicators as input.
Dataset must be built by datasetBuild.py or conform to its schema.
"""
import numpy as np
import math
import tensorflow as tf
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import normalize
from math import sqrt
from numpy import concatenate
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
import argparse
pd.options.display.max_columns = 999
y_name = 'CategoricalIncrease'
WEEKLY_TRAIN_PATH = "data/btcpricetrainingdataweekly2.csv"
WEEKLY_TEST_PATH = "data/btcpricetestingdataweekly2.csv"
DAILY_TRAIN_PATH = "data/btcpricetrainingdatadaily2.csv"
DAILY_TEST_PATH = "data/btcpricetestingdatadaily2.csv"
HR12_TRAIN_PATH = "data/btcpricetrainingdata12hr2.csv"
HR12_TEST_PATH = "data/btcpricetestingdata12hr2.csv"
CSV_COLUMN_NAMES = list((pd.read_csv(HR12_TEST_PATH)).columns.values)
CSV_COLUMN_NAMES[0]="Index"
CATEGORICALINCREASE = ['NoIncreaseMoreThanTol', 'IncreaseMoreThanTol']
parser = argparse.ArgumentParser(description='Specify LSTM hyperparameters')
parser.add_argument('-layers', metavar='L', type=int, nargs='+', default=2,
help='Number of LSTM layers')
parser.add_argument('-layer_size', metavar='S', type=int, nargs='+', default=75,
help='Integer value for the size (numer of neurons) of each LSTM layer')
parser.add_argument('-epochs', metavar='E', type=int, nargs='+', default=250,
help="Number of epochs to train")
parser.add_argument('-batch_size', metavar='B', type=int, nargs='+', default=16,
help='Batch size to train on')
args = parser.parse_args()
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg =
|
concat(cols, axis=1)
|
pandas.concat
|
import keras_NN
import numpy as np
import pandas as pd
import time
# Read data set from file
X_train =
|
pd.read_csv("./input/X_train.csv", skiprows=0)
|
pandas.read_csv
|
"""Clean, bundle and create API to load KSSL data
The KSSL database is provided as a Microsoft Access database designed
as an OLTP. The purposes of this module are: (i) to export all tables
as independent .csv files to make it platform independent; (ii) to
make it amenable to multi-dimensional analytical queries (OLAP);
(iii) to provide an API for easy loading of the dataset as numpy arrays.
For further information on KSSL database contact:
* https://www.nrcs.usda.gov/wps/portal/nrcs/main/soils/research/
"""
import subprocess
from pathlib import Path
from .base import select_rows, chunk
from spectrai.core import get_kssl_config
import pandas as pd
import numpy as np
import re
import opusFC # Ref.: https://stuart-cls.github.io/python-opusfc-dist/
from tqdm import tqdm
DATA_KSSL, DATA_NORM, DATA_SPECTRA, DB_NAME = get_kssl_config()
def access_to_csv(in_folder=None, out_folder=DATA_NORM, db_name=DB_NAME):
"""Exports KSSL '.accdb' tables to individual '.csv' files.
Linux-like OS only as depends on 'mdbtools'
https://github.com/brianb/mdbtools
Parameters
----------
in_folder: string, optional
Specify the path of the folder containing the '.accdb' KSSL file
out_folder: string, optional
Specify the path of the folder that will contain exported tables
db_name: string, optional
Specify name of the KSSL Microsoft Access database
Returns
-------
None
"""
in_folder = Path(in_folder)
out_folder = Path(out_folder)
if not in_folder.exists():
raise IOError('in_folder not found.')
if not out_folder.exists():
out_folder.mkdir(parents=True)
script_name = Path(__file__).parent / 'scripts/access2csv.sh'
out = subprocess.run([script_name, in_folder / DB_NAME, out_folder])
if out.returncode == 0:
print('KSSL tables exported successfully to .csv files.')
else:
raise OSError('Execution of access2csv.sh failed.')
def _get_layer_analyte_tbl():
"""Returns relevant clean subset of `layer_analyte.csv` KSSL DB table.
Notes
----
Only `master_prep_id` relevant to MIRS analysis selected
`calc_value` are by default `str` as possibly containing
values such as (slight, 1:2, ...). Only numeric ones are
selected
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'layer_analyte.csv', low_memory=False) \
.dropna(subset=['analyte_id', 'calc_value']) \
.pipe(select_rows, {
'master_prep_id': lambda d: d in [18, 19, 27, 28],
'calc_value': lambda d: re.search(r'[a-zA-Z]|:|\s', str(d)) is None}) \
.loc[:, ['lay_id', 'analyte_id', 'calc_value']] \
.astype({'calc_value': float})
def _get_layer_tbl():
"""Returns relevant clean subset of `analyte.csv` KSSL DB table.
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return pd.read_csv(DATA_NORM / 'layer.csv', low_memory=False) \
.loc[:, ['lay_id', 'lims_pedon_id', 'lims_site_id', 'lay_depth_to_top']] \
.dropna() \
.astype({'lims_pedon_id': 'int32', 'lims_site_id': 'int32'})
def _get_sample_tbl():
"""Returns relevant clean subset of `sample.csv` KSSL DB table.
Notes
----
Only `smp_id` > 1000 relevant to MIRS analysis selected
Returns
-------
Pandas DataFrame
New DataFrame with selected columns, rows
"""
return
|
pd.read_csv(DATA_NORM / 'sample.csv', low_memory=False)
|
pandas.read_csv
|
from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index +
|
Timedelta(1, "D")
|
pandas.Timedelta
|
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
tm.assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT,
|
pd.Timestamp("2012-05-01")
|
pandas.Timestamp
|
import importlib
import os
import pickle
import sys
import hydra
import neptune
import numpy as np
import pandas as pd
from omegaconf import DictConfig
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
sys.path.append('../utils')
from data_loader import load_datasets, load_target
from GCSOperator import GCSOperator
from logging_metrics import logging_classification
# global variable
NUM_FOLDS = 3
API_TOKEN = os.environ.get('NEPTUNE_API_TOKEN')
scoring = accuracy_score # evaluation metrics
def get_gcs_operator(config,
project_id=os.environ.get('GOOGLE_CLOUD_PROJECT')):
# setup GCS operator
bucket_name = config['bucket_name']
gcso = GCSOperator(project_id, bucket_name)
return gcso
def load_data(config, base_dir):
# load config
feats = config['features']
target_name = config['target_name']
cloud = config['cloud']
# load data
X_train_all, X_test = load_datasets(feats, base_dir=base_dir, cloud=cloud)
y_train_all = load_target(target_name, base_dir=base_dir, cloud=cloud)
return X_train_all, y_train_all, X_test
def train(X_train_all, y_train_all, X_test, module, config):
params = dict(config['model']['parameters'])
y_test_preds = []
oof_preds = []
scores = []
models = []
kf = StratifiedKFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0)
for ind, (train_index,
valid_index) in enumerate(kf.split(X=X_train_all,
y=y_train_all)):
X_train, X_valid = (X_train_all.iloc[train_index, :],
X_train_all.iloc[valid_index, :])
y_train, y_valid = y_train_all[train_index], y_train_all[valid_index]
res = module.train_and_predict(X_train, X_valid, y_train, y_valid,
X_test, params, ind, scoring)
# for evaluation and stacking
if res['y_val_pred'].ndim > 1:
y_val_pred = np.argmax(res['y_val_pred'], axis=1)
else:
y_val_pred = res['y_val_pred']
oof_pred = pd.DataFrame([y_valid.index, y_val_pred]).T
oof_pred.columns = ['index', 'pred']
# save result
y_test_preds.append(res['y_test_pred'])
oof_preds.append(oof_pred)
models.append(res['model'])
scores.append(res['score'])
# logging result
logging_classification(y_valid, res['y_val_pred'])
return y_test_preds, oof_preds, models, scores
def save_models(models, config, base_dir):
gcso = get_gcs_operator(config)
print('***** Save models *****')
for ind, model in enumerate(models):
fname = f'{neptune.get_experiment().id}_model_{ind}.pkl'
fpath = os.path.join(base_dir, 'models', fname)
gcs_path = os.path.join('model', fname)
with open(fpath, mode='wb') as fp:
pickle.dump(model, fp)
# ここにクラウド用のを書く
gcso.upload_file(gcs_path, fpath)
def save_oof(oof_preds, config, base_dir):
gcso = get_gcs_operator(config)
print('***** Save oof *****')
# concat oof result and save
df_oof =
|
pd.concat(oof_preds)
|
pandas.concat
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df =
|
pd.DataFrame({'X': [0, 1]})
|
pandas.DataFrame
|
import csv
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
from google.cloud.storage.blob import Blob
from google.cloud import storage
import yfinance as yf
import datetime
from datetime import datetime, timedelta
import os
import shutil
import logging
import os.path
import datetime
import tempfile
import base64
import json
from google.cloud import pubsub_v1
from dateutil.relativedelta import relativedelta
from pytz import timezone
def pub_bq_load_msg(file_to_load, bucket, store_path, project, region):
REGION = region
PROJECT_ID = project
RECEIVING_FUNCTION = 'publish'
function_url = f'https://{REGION}-{PROJECT_ID}.cloudfunctions.net/{RECEIVING_FUNCTION}'
if file_to_load == 'micro_cap_etf_lst.csv':
table_name = "top_micro_cap_etf"
else:
table_name = "etf_ytd_daily_summary"
param = {"project":project,"region":region,"topic":"load_etf_dataset","message":{"tgt_dataset":"etf_dataset", "tgt_tbl_name":table_name, "bucket":bucket, "store_path":store_path}}
data=json.dumps(param)
logging.info('topic-message passed:{}'.format(data))
r = requests.post(function_url, json=param)
logging.info('request post header:{} request post status:{}'.format(r.headers, r.status_code))
def load_file_to_storage(bucketname, file_path, store_path, project, region):
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucketname)
data=bucket.blob(store_path)
data.upload_from_filename(file_path)
file_name = os.path.basename(file_path)
logging.info('load_file_to_storage:file to trans={}'.format(file_name))
# pub_bq_tran_msg (file_name, project, region)
pub_bq_load_msg(file_name, bucketname, store_path, project, region)
def get_hist_etf_price(bucket, destdir, file_loc, project, region):
etf_sym_nm=pd.read_csv(file_loc)
etf_ytd_close_summary = pd.DataFrame([])
summary_file_name = "etf_ytd_close_summary.csv"
os.chdir(destdir)
file_path = os.path.join(destdir,summary_file_name)
store_path = 'ytd_2020/raw/{}'.format(os.path.basename(file_path))
# Set dates for data
now = datetime.datetime.now(timezone('US/Eastern'))
roll_one_yr = relativedelta(days=-365)
hist_end = relativedelta(days=-1)
hist_start_date = now + roll_one_yr
hist_start_notm =
|
pd.to_datetime(hist_start_date)
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dependency
----------------------------------
Dependency analysis class
Created on Nov 8, 2018
Last edited on Nov 8, 2018
@author: <NAME>
"""
import os
import io
import sys
import datetime
import numpy as np
from IPython import embed
import pandas as pd
import logging
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import re
import subprocess
import yaml
from glob import glob
import scipy
from statsmodels.robust.scale import mad
from collections import Counter
from collections import defaultdict as ddict
from sklearn.metrics import roc_curve, average_precision_score, f1_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection._search import ParameterGrid
import torch
import paths
from biovnn_model import BioVNNmodel
from utils import compute_AUC_bootstrap, plot_pred_true_r_by_gene_MAD, plot_pred_true_r_by_gene_mean, gene_level_cor, \
individual_auc, plot_ROC, plot_top_ROC, plot_hist_cor, plot_hist_auc
disease_mapping = {'Bladder Cancer': 'BLCA',
'Breast Cancer': 'BRCA',
'breast': 'BRCA',
'Cervical Cancer': 'CESC',
'Colon Cancer': 'COAD',
'Colon/Colorectal Cancer': 'COAD',
'colorectal': 'COAD',
'GBM/Brain Cancer': 'GBM',
'glioblastoma': 'GBM',
'Head and Neck Cancer': 'HNSC',
'upper_aerodigestive': 'HNSC',
'Liver Cancer': 'LIHC',
'liver': 'LIHC',
'Ovarian Cancer': 'OV',
'ovary': 'OV',
'Skin Cancer': 'SKCM',
'skin': 'SKCM',
'Gastric Cancer': 'STAD',
'Soft Tissue/ Thyroid Cancer': 'THCA',
'Thyroid Cancer': 'THCA',
'Endometrial Cancer': 'UCEC',
'Endometrial/Uterine Cancer': 'UCEC',
'uterus': 'UCEC',
'Esophageal Cancer': 'ESCA',
'esophagus': 'ESCA',
'Pancreatic Cancer': 'PAAD',
'pancreas': 'PAAD',
'Non-Small Cell Lung Cancer (NSCLC), Adenocarcinoma': 'LUAD',
'Non-Small Cell Lung Cancer (NSCLC), Squamous Cell Carcinoma': 'LUSC',
'Renal Carcinoma, clear cell': 'KIRC',
'Glioblastoma': 'GBM',
'Acute Myelogenous Leukemia (AML)': 'LAML',
'AML': 'LAML'}
def load_params(output_dir=None, param_f=None):
if param_f is None:
param_f = os.path.join(output_dir, 'param.yaml')
with open(param_f, 'r') as stream:
params = yaml.safe_load(stream)
return params
def save_params(output_dir, params):
with io.open(os.path.join(output_dir, 'param.yaml'), 'w', encoding='utf8') as outfile:
yaml.dump(params, outfile, default_flow_style=False, allow_unicode=True)
assert params == load_params(output_dir)
class Dependency(object):
def __init__(self, cancer_type, data_dir, result_dir, run_name, params,
depmap_ver='19Q3', use_hierarchy=True):
self.method = 'BioVNN'
self.cancer_type = cancer_type
self.n_cluster = None
self.run_name = run_name
self.patient_list = []
self.cancer_type_to_patients = ddict(list)
self.rna_dir = os.path.join(data_dir, 'DepMap', depmap_ver)
self.data_dir = data_dir
if 'ref_groups' in params and params['ref_groups'] == 'GO':
self.community_file = os.path.join(data_dir, 'GO', 'goa_human_20201212.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'GO', 'go_20201212_relation.txt')
else:
self.community_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.gmt')
self.community_hierarchy_file = os.path.join(self.data_dir, 'Reactome', 'ReactomePathwaysRelation.txt')
self.gene_id_file = os.path.join(self.data_dir, 'Reactome', 'Homo_sapiens_9606.gene_info')
self.gene_id_dict = pd.read_csv(self.gene_id_file, sep='\t', index_col=1)['Symbol'].to_dict()
self.Reactome_name_file = os.path.join(data_dir, 'Reactome', 'ReactomePathways.txt')
self.Reactome_name_dict = pd.read_csv(self.Reactome_name_file, sep='\t', index_col=0, header=None)[1].to_dict()
self.Reactome_reaction_file = os.path.join(self.data_dir, 'Reactome', 'NCBI2Reactome_PE_Reactions_human.txt')
self.Reactome_reaction_df = pd.read_csv(self.Reactome_reaction_file, sep='\t', index_col=None, header=None)
self.Reactome_gene_reaction_dict = ddict(list)
self.Reactome_reaction_gene_dict = ddict(list)
for i, row in self.Reactome_reaction_df.iterrows():
if 'HSA' in row[1] and 'HSA' in row[3]: # Make sure they are from human
if row[0] in self.gene_id_dict:
symbol = self.gene_id_dict[row[0]]
else:
symbol = row[2].split(' [')[0]
self.Reactome_gene_reaction_dict[symbol].append(row[3])
self.Reactome_reaction_gene_dict[row[3]].append(symbol)
self.community_dict = {}
self.community_hierarchy = []
self.community_hierarchy_all = None
self.community_hierarchy_random = []
self.community_hierarchy_random_all = None
self.community_hierarchy_ones = []
self.community_hierarchy_ones_all = None
self.community_hierarchy_dicts_all = {}
self.use_hierarchy = use_hierarchy
self.community_matrix = None
self.result_path = os.path.join(result_dir, self.__class__.__name__, run_name)
self.temp_path = os.path.join(result_dir, self.__class__.__name__, 'temp')
os.makedirs(self.result_path, exist_ok=True)
os.makedirs(self.temp_path, exist_ok=True)
self._dependency_classes = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Prospective',
'Timestamped', 'Interpret', 'Interpret_ts']
self._dependency_classes_plot = ['Dependency', 'Transfer', 'Postanalysis', 'Postanalysis_ts',
'Postanalysis_transfer', 'Timestamped']
self.params = params
self.load_result = params.get('load_result', False)
self.load_result_dir_name = params.get('load_result_dir_name', False)
if self.load_result and self.load_result_dir_name:
if 'load_result_dir_suffix' in params:
if 'load_result_dir_full' in params:
if params['load_result_dir_full']:
self.load_result_dir = params['load_result_dir_suffix']
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = os.path.join(result_dir, params['load_result_dir_suffix'])
else:
self.load_result_dir = '/'.join(self.result_path.split('/')[:-1] + [self.load_result_dir_name])
params = load_params(self.load_result_dir)
if 'run_mode' in self.params:
run_mode = self.params['run_mode']
else:
run_mode = None
self.params.update(params)
params = self.params
if run_mode:
params['run_mode'] = run_mode
self.params['run_mode'] = run_mode
self.use_cuda = params.get('use_cuda', True)
self.data_types = params.get('data_types', ['rna'])
self.use_all_gene = params.get('use_all_gene', True)
self.exp_ratio_min = params.get('exp_ratio_min', 0.01)
self.feature_max = params.get('feature_max', 99999)
self.feature_per_group_max = params.get('feature_per_group_max', 100)
self.repeat_n = params.get('repeat_n', 1)
self.fold_n = params.get('fold_n', 5)
self.cv_fold = params.get('cv_fold', 0)
self.model_v = params.get('model_v', 'clh_v1')
self.cv_fold_only_run = params.get('cv_fold_only_run', 1)
self.other_cancer_types = params.get('other_cancer_types', [])
self.rna_top_n_std = params.get('rna_top_n_std', 10000)
self.community_affected_size_min = params.get('community_affected_size_min', 5)
self.community_affected_size_max = params.get('community_affected_size_max', 999999)
self.require_label_gene_in_gene_group = params.get('require_label_gene_in_gene_group', True)
self.clip_Xval_Xtest = params.get('clip_Xval_Xtest', [-1, 1])
self.use_MinMaxScaler = params.get('use_MinMaxScaler', False)
self.use_StandardScaler = params.get('use_StandardScaler', True)
self.use_tanh_feature = params.get('use_tanh_feature', False)
self.use_sigmoid_feature = params.get('use_sigmoid_feature', False)
self.use_community_filter = params.get('use_community_filter', True)
self.test_run = params.get('test_run', False)
self.select_genes_in_label = params.get('select_genes_in_label', 'dgidb_w_interaction')
self.use_classification = params.get('use_classification', True)
self.use_binary_dependency = params.get('use_binary_dependency', True)
self.use_class_weights = params.get('use_class_weights', True)
self.use_normalized_class_weights = params.get('use_normalized_class_weights', False)
self.use_sample_class_weights = params.get('use_sample_class_weights', False)
self.use_normalized_sample_class_weights = params.get('use_normalized_sample_class_weights', True)
self.use_all_dependency_gene = params.get('use_all_dependency_gene', True)
self.use_all_feature_for_random_group = params.get('use_all_feature_for_random_group', False)
self.use_all_feature_for_fully_net = params.get('use_all_feature_for_fully_net', False)
self.use_deletion_vector = params.get('use_deletion_vector', True)
self.use_consistant_groups_for_labels = params.get('use_consistant_groups_for_labels', False)
self.run_mode = params.get('run_mode',
'ref') # Could be ref, random_predictor, random, expression_control or full
self.random_group_permutation_ratio = params.get('random_group_permutation_ratio', 1)
self.random_group_hierarchy_permutation_ratio = params.get('random_group_hierarchy_permutation_ratio', 1)
self.random_group_permutation_seed = params.get('random_group_permutation_seed', 9527)
self.leaf_group_gene_in_label_max = params.get('leaf_group_gene_in_label_max', 50)
self.split_by_cancer_type = params.get('split_by_cancer_type', True)
self.save_model_ckpt = params.get('save_model_ckpt', True)
self.output_pred_small = ['RPS20', 'MYC', 'MYCN', 'PIK3CA']
self.GSP_min = params.get('GSP_min', 6)
self.GSN_min = params.get('GSN_min', 6)
self.gene_list = None
self.gene_list_name = None
self.accuracy = None
self.f1 = None
self.confusion_mat = None
self.mcc = None
self.pearson_r = None
self.spearman_rho = None
self.mse = None
self.feature_importance = []
metrics = ['accuracy', 'confusion_mat', 'f1', 'mcc', 'pearson_r', 'spearman_rho', 'mse', 'pearson_r2',
'AUC', 'PR']
data_splits = ['train', 'val', 'test']
for x in metrics:
self.__dict__[x] = ddict(dict)
for z in range(self.repeat_n + 1):
self.__dict__[x][z] = ddict(dict)
for y in data_splits:
self.__dict__[x][z][y] = ddict(list)
for x in ['pred', 'idx']:
self.__dict__[x] = ddict(dict)
for y in data_splits:
self.__dict__[x][y] = ddict(list)
self.metric_output = {}
for y in data_splits:
self.metric_output[y] = pd.DataFrame()
self.save_load_data = ['rna']
self.depmap_ver = depmap_ver
os.makedirs(self.rna_dir, exist_ok=True)
self.save_load_data = ['rna', 'dependency']
self.hdf5_df_file = os.path.join(self.temp_path,
'df_{}_depmap_{}.hdf5'.format('_'.join(sorted(self.data_types)),
self.depmap_ver))
def prepare_data(self):
self.load_communities()
self.load_known_genes()
self.load_selected_gene_list()
if not self.load_data():
self.load_dependency()
if 'rna' in self.data_types:
self.load_rna()
self.save_data()
self.align_data()
def load_selected_gene_list(self):
if isinstance(self.select_genes_in_label, str):
if self.select_genes_in_label.lower() == 'dgidb_w_interaction':
dgidb_file = os.path.join(self.data_dir, 'DGIdb_genes_w_interactions.txt')
else:
raise ValueError("Cannot recongnize select_genes_in_label {}".format(self.select_genes_in_label))
self.select_genes_in_label = pd.read_csv(dgidb_file, header=None)[0].tolist()
elif 'ref_leaf_group' in self.run_mode:
if isinstance(self.select_genes_in_label, list):
leaf_communities, df = self.load_leaf_communities()
initial_select = set(self.select_genes_in_label)
initial_n = len(initial_select)
logging.info("Selected genes {} were used to find additional genes in the same leaf gene groups".format(
self.select_genes_in_label))
leaf_communities_with_genes = {}
for group in leaf_communities:
if len(initial_select.intersection(self.community_dict[group])) > 0:
leaf_communities_with_genes[group] = len(self.community_dict[group])
# Select leaf groups from small to large groups until it reaches the self.leaf_group_gene_in_label_max
for group, size in sorted(leaf_communities_with_genes.items(), key=lambda x: x[1]):
if len(initial_select | set(self.community_dict[group])) < self.leaf_group_gene_in_label_max:
initial_select |= set(self.community_dict[group])
logging.info("{} gene group was added as genes in labels".format(group))
self.select_genes_in_label = sorted(list(initial_select))
logging.info(
"Additional {} genes in the same leaf gene groups with selected genes were added".format(
len(self.select_genes_in_label) - initial_n))
def save_label_genes(self, genes):
"""Save label genes to file."""
fout = open(os.path.join(self.result_path, 'dependency_genes.tsv'), 'w')
for x in genes:
fout.write('{}\n'.format(x))
fout.close()
def save_communities(self, d=None):
"""Save community genes to file."""
if d is None:
fout = open(os.path.join(self.result_path, 'community_list.tsv'), 'w')
d = self.community_dict
s = ''
else:
fout = open(os.path.join(self.result_path, 'community_random_list.tsv'), 'w')
s = '_random'
for k, v in d.items():
fout.write('{}\n'.format('\t'.join([k + s] + v)))
fout.close()
def save_data(self):
hf = pd.HDFStore(self.hdf5_df_file)
for x in self.save_load_data:
if x in self.__dict__:
hf[x] = self.__dict__[x]
hf['data_types'] = pd.DataFrame(self.data_types)
# hf['other_cancer_types'] = pd.DataFrame(self.other_cancer_types)
# hf['cancer_type'] = pd.DataFrame([self.cancer_type])
# for ct in set(self.cancer_type_to_patients.keys()) | set(self.other_cancer_types) | set([self.cancer_type]):
# hf[ct] = pd.DataFrame(self.cancer_type_to_patients[ct])
# if 'cancer_type_to_patients_target' in self.__dict__:
# for ct in set(self.cancer_type_to_patients_target.keys()) | set(self.other_cancer_types) | set(
# [self.cancer_type]):
# hf[ct + '_target'] = pd.DataFrame(self.cancer_type_to_patients_target[ct])
hf.close()
def load_data(self):
if os.path.isfile(self.hdf5_df_file):
hf = pd.HDFStore(self.hdf5_df_file)
try:
for x in self.save_load_data:
if x in hf:
self.__dict__[x] = hf[x]
self.__dict__[x + '_all'] = self.__dict__[x].copy()
logging.info("Loaded data from existing hdf5 file.")
hf.close()
return True
except:
logging.info(
"Current Data types, Cancer type or Other cancer types do not match that of existing hdf5 file.")
hf.close()
return False
else:
return False
def load_communities(self, load_original=True):
"""Parses out a geneset from file."""
if self.load_result and not load_original:
lines = open('{}/community_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
else:
lines = open('{}'.format(self.community_file)).readlines()
if 'pathway' in self.community_file.lower():
ind_key = 1
ind_gene = 3
elif self.community_file.lower().endswith('.gmt'):
ind_key = 1
ind_gene = 3
else:
ind_key = 0
ind_gene = 1
self.community_genes = set()
self.community_dict = {}
self.gene_community_dict = ddict(list)
self.community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
self.community_dict[line[ind_key]] = line[ind_gene:]
self.community_size_dict[line[ind_key]] = len(line[ind_gene:])
self.community_genes |= set(line[ind_gene:])
for g in line[ind_gene:]:
self.gene_community_dict[g].append(line[ind_key])
def load_random_communities(self, load_original=True):
"""Parses out a geneset from file."""
lines = open('{}/community_random_list.tsv'.format(self.load_result_dir)).readlines()
ind_key = 0
ind_gene = 1
self.random_community_genes = set()
self.community_dict_random = {}
self.random_community_size_dict = {}
for line in lines:
line = line.strip().split('\t')
group = line[ind_key].split('_')[0]
self.community_dict_random[group] = line[ind_gene:]
self.random_community_size_dict[group] = len(line[ind_gene:])
self.random_community_genes |= set(line[ind_gene:])
def load_leaf_communities(self):
f = self.community_hierarchy_file
# The first column 0 is the parent and the second column 1 is the child
df = pd.read_csv(f, sep='\t', header=None)
if 'Reactome' in f:
df = df.loc[df[0].str.contains('HSA')] # Get human-only pathways
# Make root as the parent of those gene groups without parents
df_root = pd.DataFrame(columns=df.columns)
for x in set(df[0]) - set(df[1]):
if x in self.community_dict or 'GO:' in x:
df_root = pd.concat([df_root, pd.DataFrame(['root', x]).T])
# Remove those relationship of groups not in the analysis
df = df.loc[df[1].isin(self.community_dict.keys()) & df[0].isin(self.community_dict.keys())]
df = pd.concat([df, df_root])
leaf_communities = sorted(list((set(df[1]) - set(df[0])) & set(self.community_dict.keys())))
return leaf_communities, df
def load_random_hierarchy(self):
f = '{}/random_group_hierarchy.tsv'.format(self.load_result_dir)
df = pd.read_csv(f, sep='\t', header=None)
return df
def load_known_genes(self, depmap_ver=None):
if depmap_ver is None:
depmap_ver = self.depmap_ver
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
depmap_dir = os.environ.get('DEPMAP_DIR')
if depmap_ver not in depmap_dir:
depmap_dir = regex.sub(depmap_ver, depmap_dir)
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
depmap_cell_line_file = os.path.join(depmap_dir, 'sample_info.csv')
else:
depmap_cell_line_file = os.path.join(depmap_dir, 'DepMap-20{}-celllines.csv'.format(depmap_ver.lower()))
self.cell_line_metadata = pd.read_csv(depmap_cell_line_file)
self.cell_line_metadata = self.cell_line_metadata.set_index('DepMap_ID')
try:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE_Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_mapping = self.cell_line_metadata['CCLE Name'].to_dict()
self.cell_line_id_pri_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['Primary Disease'].to_dict()
except:
self.cell_line_id_pri_dis = self.cell_line_id_pri_dis['lineage'].to_dict()
try:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE_Name')
except:
self.cell_line_id_sub_dis = self.cell_line_metadata.set_index('CCLE Name')
try:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['Subtype Disease'].to_dict()
except:
self.cell_line_id_sub_dis = self.cell_line_id_sub_dis['lineage_subtype'].to_dict()
self.cell_line_id_mapping = ddict(lambda: None, self.cell_line_id_mapping)
self.cell_line_id_pri_dis = ddict(lambda: None, self.cell_line_id_pri_dis)
self.cell_line_id_sub_dis = ddict(lambda: None, self.cell_line_id_sub_dis)
def load_dependency(self, depmap_ver=None, dep_data_type='Dependency'):
depmap_genetic_vulnerabilities_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genetic_vulnerabilities_dir:
depmap_genetic_vulnerabilities_dir = regex.sub(depmap_ver, depmap_genetic_vulnerabilities_dir)
if dep_data_type == 'CERES':
depmap_file = 'Achilles_gene_effect.csv'
elif dep_data_type == 'Dependency':
depmap_file = 'Achilles_gene_dependency.csv'
self.dependency = pd.read_csv(os.path.join(depmap_genetic_vulnerabilities_dir, depmap_file), header=0,
index_col=0)
self.dependency.columns = [x.split(' (')[0] for x in self.dependency.columns]
self.dependency = self.dependency[sorted(self.dependency.columns)]
# Map cell line id to name
self.dependency.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
self.dependency.index]
self.dependency = self.dependency.loc[sorted(self.dependency.index)]
self.dependency = self.dependency.fillna(0)
def load_rna(self, depmap_ver=None):
depmap_genomic_characterization_dir = os.environ.get('DEPMAP_DIR')
regex = re.compile(r"\d\dQ\d", re.IGNORECASE)
if depmap_ver is None:
depmap_ver = self.depmap_ver
if '19Q2' == depmap_ver or '19Q3' == depmap_ver or '19Q4' == depmap_ver or '20Q' in depmap_ver:
if depmap_ver not in depmap_genomic_characterization_dir:
depmap_genomic_characterization_dir = regex.sub(depmap_ver, depmap_genomic_characterization_dir)
depmap_file = 'CCLE_expression.csv'
if '20Q2' in depmap_ver:
sep_str = '\t'
else:
sep_str = ','
self.rna = pd.read_csv(os.path.join(depmap_genomic_characterization_dir, depmap_file), header=0,
index_col=0, sep=sep_str)
self.rna.columns = [x.split(' (')[0] for x in self.rna.columns]
# Merge columns with the same gene symbol
dup_genes = [item for item, count in Counter(self.rna.columns).items() if count > 1]
unique_genes = list(set(self.rna.columns).difference(dup_genes))
RNAseq_gene = self.rna[unique_genes]
for col in set(dup_genes):
RNAseq_gene[col] = self.rna[col].sum(axis=1)
# Map cell line id to name
RNAseq_gene.index = [self.cell_line_id_mapping[x] if x in self.cell_line_id_mapping else x for x in
RNAseq_gene.index]
for cell in set(self.dependency.index).intersection(RNAseq_gene.index):
cell_type = self.cell_line_id_pri_dis[cell]
cell_subtype = self.cell_line_id_sub_dis[cell]
if cell_type in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_type]]:
self.cancer_type_to_patients[disease_mapping[cell_type]].append(cell)
elif cell_subtype in disease_mapping:
if cell not in self.cancer_type_to_patients[disease_mapping[cell_subtype]]:
self.cancer_type_to_patients[disease_mapping[cell_subtype]].append(cell)
if cell not in self.cancer_type_to_patients[cell_type]:
self.cancer_type_to_patients[cell_type].append(cell)
self.rna = RNAseq_gene
self.rna = self.rna[sorted(self.rna.columns)]
self.rna = self.rna.loc[sorted(self.rna.index)]
self.rna_all = self.rna.copy()
def _subset_samples(self):
# Get overlapping patients among data types
overlapping_patients = set(self.dependency.index)
for x in self.data_types:
# Get patient ID
overlapping_patients &= set(self.__dict__[x].index)
if self.cancer_type == 'PANC':
selected_samples = sorted(list(overlapping_patients))
else:
selected_samples = sorted(list(set(self.cancer_type_to_patients[self.cancer_type])))
overlapping_patients &= set(selected_samples)
overlapping_patients = sorted(list(overlapping_patients))
for x in self.data_types:
self.__dict__[x] = self.__dict__[x].loc[overlapping_patients]
self.dependency = self.dependency.loc[overlapping_patients]
logging.info("Total {} samples have {} and dependency data".format(
len(overlapping_patients), " ".join(self.data_types)))
def _subset_target_genes(self):
try:
self.genes_in_label = pd.read_csv(self.load_result_dir + '/dependency_genes.tsv', sep='\t', header=None)
self.genes_in_label = list(self.genes_in_label.values.T[0])
except:
if self.use_all_dependency_gene:
self.genes_in_label = sorted(list(set(self.community_genes).intersection(self.dependency.columns)))
else:
self.genes_in_label = sorted(list(set(self.genes).intersection(self.dependency.columns)))
if len(self.select_genes_in_label) > 0:
self.genes_in_label = sorted(list(set(self.genes_in_label).intersection(self.select_genes_in_label)))
genes_not_found = set(self.select_genes_in_label).difference(self.genes_in_label)
logging.debug("Genes not found: {}".format(genes_not_found))
if 'Timestamped' not in self.__class__.__name__:
logging.info("{} out of {} selected genes are in dependency data.".format(
len(self.genes_in_label) - len(genes_not_found),
len(self.select_genes_in_label)))
gsp_total = (self.dependency[self.genes_in_label] >= 0.5).sum()
cond = (gsp_total >= self.GSP_min) & (self.dependency.shape[0] - gsp_total >= self.GSN_min)
cond_col = sorted([y for x, y in zip(cond, cond.index) if x])
logging.info("{} genes have at least {} gold standard positives and {} negatives".format(len(cond_col),
self.GSP_min,
self.GSN_min))
self.dependency = self.dependency[cond_col]
self.genes_in_label = cond_col
self.gsp_n = (self.dependency >= 0.5).sum().sum()
self.gsn_n = (self.dependency < 0.5).sum().sum()
if self.use_classification:
logging.info("Positive:negative samples = {}:{}".format(self.gsp_n, self.gsn_n))
def _select_feature_genes(self):
overlapping_genes = set(self.community_genes)
try:
self.rna_mad = pd.read_csv(self.load_result_dir + '/RNA_mad.tsv', sep='\t', index_col=0)
self.rna_mad.columns = [0]
except:
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
expressed_genes = ((self.rna >= 1).sum() > (self.rna.shape[0]) * self.exp_ratio_min)
self.rna_mad = self.rna.apply(mad)
self.rna_mad = pd.DataFrame(self.rna_mad, index=self.rna.columns)
self.rna_mad = self.rna_mad.loc[expressed_genes]
self.rna_mad = self.rna_mad.sort_values(by=0, ascending=False)
self.rna_mad.to_csv(os.path.join(self.result_path, 'RNA_mad.tsv'), sep='\t')
top_mad_genes = self.rna_mad.head(min(self.rna_top_n_std, self.rna_mad.shape[0])).index
self.output_pred_small += list(top_mad_genes)[0:20]
self.output_pred_small += list(top_mad_genes)[
int(self.rna_top_n_std / 2 - 10):int(self.rna_top_n_std / 2 + 10)]
self.output_pred_small += list(top_mad_genes)[-20:]
self.rna = self.rna[top_mad_genes]
overlapping_genes &= set(self.rna.columns)
self.rna = self.rna[sorted(list(overlapping_genes))]
logging.info("Total {} genes have top {} mad and gene group data".format(
len(overlapping_genes), self.rna.shape[1]))
def _filter_community(self):
com_to_drop = []
modeled_com_genes = set()
modeled_genes = set()
for data_type in self.data_types:
modeled_genes |= set(self.__dict__[data_type].columns)
for com, members in self.community_dict.items():
if self.use_all_dependency_gene:
self.community_dict[com] = sorted(
list((set(modeled_genes) & set(members)) | (set(members) & set(self.genes_in_label))))
else:
self.community_dict[com] = sorted(list(set(modeled_genes).intersection(members)))
if len(self.community_dict[com]) < self.community_affected_size_min:
com_to_drop.append(com)
elif len(self.community_dict[com]) > self.community_affected_size_max:
com_to_drop.append(com)
elif len(set(members) & set(self.genes_in_label)) < 1:
if self.require_label_gene_in_gene_group:
com_to_drop.append(com)
else:
modeled_com_genes |= set(self.community_dict[com])
else:
modeled_com_genes |= set(self.community_dict[com])
for com in com_to_drop:
self.community_dict.pop(com, None)
def _run_create_filter(self):
self.feature_genes = set()
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
self.community_filter = self.__create_filter(self.gene_community_dict, self.community_dict,
self.community_size_dict, random=False)
def __create_filter(self, gene_community_dict, community_dict, community_size_dict, random=False):
community_filter = ddict(set)
if not random:
self.genes_in_label_idx = {}
self.idx_genes_in_label = {}
i = 0
for g in self.genes_in_label:
coms = gene_community_dict[g]
coms = list(set(coms) & (community_dict.keys()))
com_size = [community_size_dict[x] for x in coms]
community_filter[g] |= set([g])
for s, com in sorted(zip(com_size, coms)):
genes = set(community_dict[com])
# Choose top n genes so that not too many features were used per gene group
if 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
if len(added_genes) == 0:
continue
if isinstance(self.feature_per_group_max, int):
choose_n = min(self.feature_per_group_max, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
choose_n = min(top_n, len(added_genes))
top_genes = list(np.random.choice(list(added_genes), choose_n, replace=False))
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
else:
if len(self.data_types) > 1:
added_genes = set(genes - community_filter[g]) & (set(self.mut.columns) | set(self.rna.columns))
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'rna' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.rna.columns)
variable_genes = self.rna_mad.loc[list(added_genes)].sort_values(0, ascending=False)
elif 'mut' in self.data_types:
added_genes = set(genes - community_filter[g]) & set(self.mut.columns)
variable_genes = self.mut_freq.loc[list(added_genes)].sort_values(0, ascending=False)
if isinstance(self.feature_per_group_max, int):
top_genes = variable_genes.head(self.feature_per_group_max).index
elif isinstance(self.feature_per_group_max, float) and self.feature_per_group_max < 1:
top_n = np.ceil(len(genes) * self.feature_per_group_max)
top_genes = variable_genes.head(top_n).index
else:
raise ValueError("feature_per_group_max {} should be integer or between 0 and 1".format(
self.feature_per_group_max))
community_filter[g] |= set(top_genes)
if len(community_filter[g]) >= self.feature_max:
break
if not random:
if len(community_filter[g]) > 0:
self.genes_in_label_idx[g] = i
self.idx_genes_in_label[i] = g
i += 1
else:
logging.info("Gene {} could not find feature genes".format(g))
if not random:
logging.info(
"The dependency of total {} genes will be predicted".format(len(self.genes_in_label_idx.keys())))
return community_filter
def _build_hierarchy(self):
leaf_communities, df = self.load_leaf_communities()
child = leaf_communities
# The layer having only gene children
level = 1
self.community_level_dict = dict()
self.level_community_dict = dict()
count_dict = ddict(int)
for x in child:
self.community_level_dict[x] = level
count_dict[x] += 1
self.level_community_dict[level] = child
# logging.info("Layer {} has {} gene groups".format(level, len(child)))
while 1:
df_level = df.loc[df[1].isin(child)]
if df_level.shape[0] == 0:
break
level += 1
parent = sorted(list(set(df_level[0])))
for parent_group in parent:
self.community_level_dict[parent_group] = level
count_dict[parent_group] += 1
self.level_community_dict[level] = parent
child = parent
# Make the layer number of each community unique
self.level_community_dict = ddict(list)
for g, level in self.community_level_dict.items():
self.level_community_dict[level].append(g)
for level, groups in sorted(self.level_community_dict.items()):
logging.info("Layer {} has {} gene groups".format(level, len(groups)))
gene_groups_all = sorted(list(self.community_dict.keys())) + ['root']
logging.info(
"Total {} layers of {} gene groups in the hierarchy including the root".format(level, len(gene_groups_all)))
feature_genes_all = []
self.feature_n = []
np.random.RandomState(self.params['seeds'][0])
for data_type in self.data_types:
feat_n = len(self.__dict__[data_type].columns)
self.feature_n.append(feat_n)
# Randomly reselect features for each feature matrix
if 'full' in self.run_mode and self.use_all_feature_for_fully_net:
feat_pool = sorted(list(self.__dict__[data_type + '_all'].columns))
feature_genes_all += feat_pool
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, feat_pool]
logging.info(
"Use all {} genes from {} as features to form fully connected networks".format(feat_n, data_type))
elif 'ref' not in self.run_mode and self.use_all_feature_for_random_group:
feat_pool = list(self.__dict__[data_type + '_all'].columns)
# Require gene labels in the features
pre_select = set(feat_pool) & set(self.genes_in_label)
feat_pool = sorted(list(set(feat_pool) - set(self.genes_in_label)))
random_feat = sorted(list(np.random.choice(feat_pool, feat_n - len(pre_select), replace=False)))
feature_genes_all += random_feat + list(pre_select)
feature_genes_all = sorted(feature_genes_all)
cell_idx = self.__dict__[data_type].index
self.__dict__[data_type] = self.__dict__[data_type + '_all'].loc[cell_idx, random_feat]
logging.info(
"Randomly select {} genes including {} gene of prediction from {} as features to form random gene groups".format(
feat_n, len(self.genes_in_label), data_type))
else:
feature_genes_all += sorted(list(self.__dict__[data_type].columns))
del_genes_all = sorted(list(self.genes_in_label_idx.keys()))
self.feature_n.append(len(del_genes_all))
self.genes_in_label = del_genes_all
self.save_label_genes(self.genes_in_label)
self.y = self.dependency[self.genes_in_label]
self.y_binary = ((self.y >= 0.5) + 0).astype(int)
# The order of indexed genes and gen groups:
if self.use_deletion_vector:
entity_all = feature_genes_all + del_genes_all + gene_groups_all
else:
entity_all = feature_genes_all + gene_groups_all
self.idx_name = {i: k for i, k in enumerate(entity_all)}
name_idx = ddict(list)
for k, v in self.idx_name.items():
name_idx[v].append(k)
if len(self.data_types) > 1:
self.mut_genes_idx = {}
self.rna_genes_idx = {}
for k, v in name_idx.items():
for idx in v:
if idx < self.feature_n[0]:
self.mut_genes_idx[k] = idx
elif self.feature_n[0] <= idx < self.feature_n[0] + self.feature_n[1]:
self.rna_genes_idx[k] = idx
self.feature_genes_idx = {x: min(name_idx[x]) for x in feature_genes_all}
self.del_genes_idx = {x: max(name_idx[x]) for x in del_genes_all}
self.gene_group_idx = {x: name_idx[x][0] for x in gene_groups_all}
self.community_hierarchy_dicts_all = {'idx_name': self.idx_name,
'feature_genes_idx': self.feature_genes_idx,
'del_genes_idx': self.del_genes_idx,
'gene_group_idx': self.gene_group_idx}
self.child_map_all = []
self.child_map_all_random = []
self.child_map_all_ones = []
feature_only_genes = set(feature_genes_all) - set(del_genes_all)
dep_only_genes = set(del_genes_all) - set(feature_genes_all)
feature_dep_both_genes = set(feature_genes_all) & set(del_genes_all)
gene_pool = sorted(list(set(feature_genes_all) | set(del_genes_all)))
self.community_filter_random = ddict(list)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
self.load_random_communities()
random_hierarchy = self.load_random_hierarchy()
else:
self.community_dict_random = {}
random_hierarchy = pd.DataFrame()
self.gene_community_dict_random = ddict(list)
self.community_size_dict_random = {}
prng = np.random.RandomState(self.params['seeds'][0])
logging.info("Building gene group hierarchy")
if self.run_mode == 'random':
idx_gene_pool = {i: g for i, g in enumerate(gene_pool)}
gene_pool_idx = {g: i for i, g in enumerate(gene_pool)}
partially_shuffled_membership = self.__partially_shuffle_gene_group(gene_pool, gene_pool_idx)
idx_gene_group = {i: g for g, i in self.gene_group_idx.items()}
partially_shuffled_relation = self.__partially_shuffle_gene_group_hierarchy(df, idx_gene_group)
else:
partially_shuffled_membership = None
partially_shuffled_relation = None
idx_gene_group = None
idx_gene_pool = None
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
genes = self.community_dict[group]
gene_idx = self._genes_to_feat_del_idx(genes)
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
genes_random = self.community_dict_random[group]
else:
if partially_shuffled_membership is not None:
genes_random_idx = partially_shuffled_membership[idx - min_group_idx].nonzero()[0]
genes_random = sorted([idx_gene_pool[x] for x in genes_random_idx])
else:
if self.use_consistant_groups_for_labels:
gene_pool = sorted(list(set(gene_pool) - set(self.genes_in_label)))
pre_select = set(genes) & set(self.genes_in_label)
if len(set(genes) & set(self.genes_in_label)) > 0:
random_feat = list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False))
genes_random = sorted(random_feat + list(pre_select))
else:
genes_random = sorted(
list(prng.choice(gene_pool, len(genes) - len(pre_select), replace=False)))
else:
genes_random = sorted(list(prng.choice(gene_pool, len(genes), replace=False)))
self.community_dict_random[group] = genes_random
for g in genes_random:
self.gene_community_dict_random[g].append(group)
self.community_size_dict_random[group] = len(genes_random)
feat_genes = set(genes_random) & set(self.feature_genes_idx.keys())
del_genes = set(genes_random) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx_random = feat_gene_idx + del_gene_idx
else:
gene_idx = []
gene_idx_random = []
child = sorted(df.loc[df[0] == group, 1].tolist())
child_idx = sorted([self.gene_group_idx[x] for x in child if x in self.gene_group_idx])
self.child_map_all.append(sorted(gene_idx + child_idx))
if len(self.child_map_all[-1]) == 0:
logging.info("Gene group {} does not have children".format(group))
# Build random group hierarchy
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
child_random = sorted(random_hierarchy.loc[random_hierarchy[0] == group, 1].tolist())
child_idx_random = sorted([self.gene_group_idx[x] for x in child_random if x in self.gene_group_idx])
else:
if partially_shuffled_relation is not None:
child_idx_random = partially_shuffled_relation[idx - min_group_idx, :].nonzero()[0]
child_idx_random = [x + min_group_idx for x in child_idx_random]
child_random = sorted([idx_gene_group[x] for x in child_idx_random])
else:
child_idx_random = []
child_random = []
for c in child:
child_level = self.community_level_dict[c]
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
child_random.append(random_child)
random_c_idx = self.gene_group_idx[random_child]
child_idx_random.append(random_c_idx)
for rc in sorted(child_random):
random_hierarchy = pd.concat([random_hierarchy, pd.DataFrame([group, rc]).T], axis=0)
self.child_map_all_random.append(sorted(gene_idx_random + child_idx_random))
try:
assert len(gene_idx) == len(gene_idx_random), "Random gene number does not match"
except AssertionError:
pass
# Children for fully connected neural networks
if group in leaf_communities:
gene_idx_ones = list(self.feature_genes_idx.values())
else:
gene_idx_ones = []
parent_level = self.community_level_dict[group]
child_level = parent_level - 1
if child_level in self.level_community_dict:
child_ones = self.level_community_dict[child_level]
else:
child_ones = []
child_idx_ones = [self.gene_group_idx[x] for x in child_ones if x in self.gene_group_idx]
self.child_map_all_ones.append(sorted(gene_idx_ones + child_idx_ones))
self.save_communities(self.community_dict_random)
# Save random hierarchy as file
random_hierarchy.to_csv(os.path.join(self.result_path, 'random_group_hierarchy.tsv'),
index=None, sep='\t', header=None)
self.community_filter_random = self.__create_filter(self.gene_community_dict_random, self.community_dict_random,
self.community_size_dict_random, random=True)
self.community_filter_map = []
self.community_filter_map_random = []
feature_n = len(feature_genes_all)
for g in del_genes_all:
feat_genes = set(self.community_filter[g])
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
feat_gene_idx = sorted(feat_gene_idx)
else:
feat_gene_idx = sorted([self.feature_genes_idx[x] for x in feat_genes if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_gene_idx] = 1
self.community_filter_map.append(feat_genes_array)
feat_genes_random = set(self.community_filter_random[g])
if len(self.data_types) > 1:
feat_genes_random_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_genes_random_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_genes_random_idx.append(self.rna_genes_idx[g])
feat_genes_random_idx = sorted(feat_genes_random_idx)
else:
feat_genes_random_idx = sorted(
[self.feature_genes_idx[x] for x in feat_genes_random if x in self.feature_genes_idx])
feat_genes_array = np.zeros(feature_n)
feat_genes_array[feat_genes_random_idx] = 1
self.community_filter_map_random.append(feat_genes_array)
def __partially_shuffle_gene_group(self, gene_pool, gene_pool_idx):
group_gene_membership_matrix = np.zeros([len(self.gene_group_idx), len(gene_pool)])
min_group_idx = min(self.gene_group_idx.values())
for group, idx in sorted(self.gene_group_idx.items()):
if group in self.community_dict:
idx -= min_group_idx
genes = self.community_dict[group]
gene_idx = [gene_pool_idx[gene] for gene in genes]
group_gene_membership_matrix[idx, gene_idx] = 1
all_idx = group_gene_membership_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_permutation_ratio*100}% ({shuffled_number}) of gene membership was randomly shuffled")
# No shuffling
if self.random_group_permutation_ratio == 0:
return group_gene_membership_matrix
connections_to_shuffled = np.zeros([len(self.gene_group_idx), len(gene_pool)])
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_membership = np.zeros([len(self.gene_group_idx), len(gene_pool)])
for i in range(group_gene_membership_matrix.shape[0]):
original = group_gene_membership_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
pool = sorted(list(set(range(len(group_gene_membership_matrix[i]))) - set(keep)))
after_shuffled = list(prng.choice(pool, len(to_shuffled), replace=False))
partially_shuffled_membership[i][keep + after_shuffled] = 1
else:
partially_shuffled_membership[i][original] = 1
return partially_shuffled_membership
def __partially_shuffle_gene_group_hierarchy(self, df, idx_gene_group):
gene_group_relation_matrix = np.zeros([len(self.gene_group_idx), len(self.gene_group_idx)])
min_group_idx = min(self.gene_group_idx.values())
for _, row in df.iterrows():
parent = self.gene_group_idx[row[0]] - min_group_idx
child = self.gene_group_idx[row[1]] - min_group_idx
gene_group_relation_matrix[parent, child] = 1
all_idx = gene_group_relation_matrix.nonzero()
prng = np.random.RandomState(self.random_group_permutation_seed)
shuffled_number = int(self.random_group_hierarchy_permutation_ratio * len(all_idx[0]))
shuffled_relationship_idx = prng.choice(range(len(all_idx[0])), shuffled_number, replace=False)
logging.info(
f"{self.random_group_hierarchy_permutation_ratio*100}% ({shuffled_number}) of gene group hierarchy was randomly shuffled")
connections_to_shuffled = np.zeros(gene_group_relation_matrix.shape)
connections_to_shuffled[all_idx[0][shuffled_relationship_idx], all_idx[1][shuffled_relationship_idx]] = 1
partially_shuffled_relation = np.zeros(gene_group_relation_matrix.shape)
# No shuffling
if self.random_group_hierarchy_permutation_ratio == 0:
return gene_group_relation_matrix
# Shuffle child group for each parent
for i in range(gene_group_relation_matrix.shape[0]):
original = gene_group_relation_matrix[i].nonzero()[0]
to_shuffled = connections_to_shuffled[i].nonzero()[0]
if len(to_shuffled) > 0:
keep = list(set(original) - set(to_shuffled))
children = [idx_gene_group[x + min_group_idx] for x in to_shuffled]
child_levels = [self.community_level_dict[child] for child in children]
after_shuffled = []
for child_level in child_levels:
random_child = prng.choice(self.level_community_dict[child_level], 1, replace=False)[0]
random_child_idx = self.gene_group_idx[random_child] - min_group_idx
after_shuffled.append(random_child_idx)
after_shuffled = list(set(after_shuffled))
partially_shuffled_relation[i][keep + after_shuffled] = 1
else:
partially_shuffled_relation[i][original] = 1
return partially_shuffled_relation
def _genes_to_feat_del_idx(self, genes):
feat_genes = set(genes) & set(self.feature_genes_idx.keys())
del_genes = set(genes) & set(self.del_genes_idx.keys())
if len(self.data_types) > 1:
feat_gene_idx = []
for g in feat_genes:
if g in self.mut_genes_idx:
feat_gene_idx.append(self.mut_genes_idx[g])
if g in self.rna_genes_idx:
feat_gene_idx.append(self.rna_genes_idx[g])
else:
feat_gene_idx = [self.feature_genes_idx[x] for x in feat_genes]
if self.use_deletion_vector:
del_gene_idx = [self.del_genes_idx[x] for x in del_genes]
else:
del_gene_idx = []
gene_idx = feat_gene_idx + del_gene_idx
return gene_idx
def _get_genes_in_child_group(self, group, genes_in_child_gene_group=set()):
_, df = self.load_leaf_communities()
children = df.loc[df[0] == group, 1].tolist()
for child in children:
if child in self.community_dict:
genes = self.community_dict[child]
genes_in_child_gene_group |= set(genes)
self._get_genes_in_child_group(child, genes_in_child_gene_group)
return genes_in_child_gene_group
def align_data(self):
self._subset_samples()
self._subset_target_genes()
self._select_feature_genes()
self._filter_community()
self._run_create_filter()
if len(self.data_types) > 1:
self.X = pd.concat([self.mut, self.rna], axis=1)
else:
self.X = self.__dict__[self.data_types[0]]
self.X_all = self.X
self._build_hierarchy()
# self._refine_community()
logging.info("Generating data splits for {} repeats and {} folds".format(self.repeat_n, self.fold_n))
self.split_data()
def split_data(self):
self.split_idx = dict()
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
if self.split_by_cancer_type and self.cancer_type == 'PANC':
cancer_type_id = ddict(list)
for x in self.X.index:
t = '_'.join(x.split('_')[1:])
cancer_type_id[t].append(x)
self.split_idx[repeat] = [ddict(list) for _ in range(self.fold_n)]
for j, (cancer_type, idx) in enumerate(cancer_type_id.items()):
logging.debug("{} has {} cell lines".format(cancer_type, len(idx)))
if len(idx) >= self.fold_n + 1:
logging.debug("{} has {} cell lines splitting".format(cancer_type, len(idx)))
split_subidx = self._split_data(self.X.loc[idx], self.y.loc[idx], seed)
for fold, split_dict in enumerate(split_subidx):
for split_type in split_dict.keys():
self.split_idx[repeat][fold][split_type] += list(split_dict[split_type])
if 'Timestamped' in self.__class__.__name__ or 'Sensitivity' in self.__class__.__name__:
target_idx = set(self.dependency_target.index) & set(self.rna_target_all.index)
target_idx_only = target_idx - set(self.dependency.index)
target_idx_only = sorted(list(target_idx_only))
for fold in range(len(self.split_idx[repeat])):
self.split_idx[repeat][fold]['test'] = target_idx_only
self.X_all = pd.concat([self.X_all, self.rna_target.loc[target_idx_only, self.X_all.columns]])
self.y = pd.concat([self.y, self.dependency_target.loc[target_idx_only, self.y.columns]])
y_binary_target = ((self.y.loc[target_idx_only] >= 0.5) + 0).astype(int)
self.y_binary = pd.concat([self.y_binary, y_binary_target])
else:
self.split_idx[repeat] = self._split_data(self.X, self.y, seed)
def _split_data(self, X, y, seed):
kf1 = KFold(n_splits=self.fold_n, random_state=seed)
split_idx = []
for fold, (train_index, test_index) in enumerate(kf1.split(X, y)):
split_dict = dict()
split_dict['test'] = list(X.index[test_index])
# Generate validation data by splitting part of training data
X_train, y_train = X.loc[X.index[train_index]], y.loc[X.index[train_index]]
if X_train.shape[0] < self.fold_n:
return []
kf = KFold(n_splits=self.fold_n, random_state=seed)
for fold_2, (train_index, test_index) in enumerate(kf.split(X_train, y_train)):
split_dict['train'] = list(X_train.index[train_index])
split_dict['val'] = list(X_train.index[test_index])
if fold_2 == fold: # Taking the different splits to differentiate it
break
split_idx.append(split_dict)
return split_idx
def get_split_data(self, i, j):
self.idx['train'] = self.split_idx[i][j]['train']
self.idx['val'] = self.split_idx[i][j]['val']
self.idx['test'] = self.split_idx[i][j]['test']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
self.X_test, self.y_test = self.X_all.loc[self.idx['test']].values, y.loc[self.idx['test']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
logging.info("Test data shape X: {}, y: {}".format(self.X_test.shape, self.y_test.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_test, axis=1))))
def perform(self, model_name, params=None):
if params is None:
params = self.params
save_params(self.result_path, params)
if self.cv_fold != 0:
if 'models' in params and 'random_forest' in self.run_mode:
self.perform_cv('random_forest', params)
else:
self.perform_cv(model_name, params)
else:
self.prepare_data()
# self.community_filter_ones = np.ones(self.community_filter.shape)
model_name_base = model_name
for repeat in range(self.repeat_n):
params['seed'] = params['seeds'][repeat]
# self.community_matrix_random = lil_matrix(self.community_matrix.shape)
np.random.seed(params['seed'])
if 'clh_v' in self.model_v:
mask = self.child_map_all
mask_random = self.child_map_all_random
mask_ones = self.child_map_all_ones
else:
mask = self.community_hierarchy
mask_random = self.community_hierarchy_random
mask_ones = self.community_hierarchy_ones
for fold in range(len(self.split_idx[repeat])):
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + 'fold' + str(fold)
self.get_split_data(repeat, fold)
self.calculate_weights()
self.normalize_data()
if 'ref' in self.run_mode:
self.run_exp(model_name_base, model_suffix,
params, mask, repeat, fold, self.community_filter_map)
elif 'random_forest' in self.run_mode.lower():
self.run_exp('random_forest', model_suffix,
params, mask, repeat, fold, None, mask_ones)
elif 'random_predictor' in self.run_mode:
self.run_exp('random_predictor', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'random' in self.run_mode:
self.run_exp('random_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'expression_control' in self.run_mode:
self.run_exp('expression_control', model_suffix,
params, mask_random, repeat, fold, self.community_filter_map_random)
elif 'full' in self.run_mode:
self.run_exp('gene_control', model_suffix,
params, mask, repeat, fold, None, mask_ones)
def calculate_weights(self):
if self.use_class_weights:
gsp_n = (self.y_train >= 0.5).sum().sum()
gsn_n = (self.y_train < 0.5).sum().sum()
if self.use_normalized_class_weights:
self.class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.class_weight_neg = gsp_n / gsn_n
self.class_weight_pos = 1
else:
self.class_weight_neg = None
self.class_weight_pos = None
if self.use_sample_class_weights:
gsp_n = (self.y_train >= 0.5).sum(axis=0)
gsn_n = (self.y_train < 0.5).sum(axis=0)
if self.use_normalized_sample_class_weights:
self.sample_class_weight_neg = (gsp_n + gsn_n) / (2.0 * (gsn_n))
self.sample_class_weight_pos = (gsp_n + gsn_n) / (2.0 * (gsp_n))
else:
self.sample_class_weight_neg = gsp_n / gsn_n
self.sample_class_weight_pos = np.array([1] * len(gsn_n))
else:
self.sample_class_weight_neg = None
self.sample_class_weight_pos = None
def split_data_cv(self):
self.split_idx_cv = ddict(list)
for repeat in range(self.repeat_n):
seed = self.params['seeds'][repeat]
kf1 = KFold(n_splits=self.cv_fold, random_state=seed)
idx = sorted(list(self.idx['train']) + list(self.idx['val']))
X_train_val = self.X_all.loc[idx]
y_train_val = self.y.loc[idx]
for train_index, val_index in kf1.split(X_train_val, y_train_val):
split_dict = {}
split_dict['train'] = X_train_val.index[train_index]
split_dict['val'] = X_train_val.index[val_index]
self.split_idx_cv[repeat].append(split_dict)
def get_split_data_cv(self, i, j):
self.idx['train'] = self.split_idx_cv[i][j]['train']
self.idx['val'] = self.split_idx_cv[i][j]['val']
if self.use_binary_dependency:
y = self.y_binary
else:
y = self.y
self.X_train, self.y_train = self.X_all.loc[self.idx['train']].values, y.loc[self.idx['train']].values
self.X_val, self.y_val = self.X_all.loc[self.idx['val']].values, y.loc[self.idx['val']].values
if 'cl3_' in self.model_v or 'cl5_' in self.model_v:
scaler = StandardScaler()
self.y_train2 = scaler.fit_transform(self.y_train)
self.y_val2 = scaler.transform(self.y_val)
self.y_test2 = scaler.transform(self.y_test)
elif 'clh_' in self.model_v:
self.y_train2 = self.y_train
self.y_val2 = self.y_val
self.y_test2 = self.y_test
else:
self.y_train2 = None
self.y_val2 = None
self.y_test2 = None
logging.info("Repeat {}, cv_fold {}".format(i, j))
logging.info("Training data shape X: {}, y: {}".format(self.X_train.shape, self.y_train.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_train, axis=1))))
logging.info("Validation data shape X: {}, y: {}".format(self.X_val.shape, self.y_val.shape))
logging.info("y label counts: {}".format(Counter(np.argmax(self.y_val, axis=1))))
def _normalize_rna(self, X_train, X_val, X_test):
# scaler = MinMaxScaler()
# self.X_train = scaler.fit_transform(self.X_train)
# self.X_val = scaler.transform(self.X_val)
# self.X_test = scaler.transform(self.X_test)
# self.X_train = np.log2(self.X_train + 1)
# self.X_val = np.log2(self.X_val + 1)
# self.X_test = np.log2(self.X_test + 1)
if self.use_StandardScaler:
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
# feature_no_info = ((self.X_train.sum(axis=0) == 0) + 0).nonzero()[0]
X_val = scaler.transform(X_val)
# self.X_val[self.X_val > self.X_train.max()] = self.X_train.max()
# self.X_val[:, feature_no_info] = 0
X_test = scaler.transform(X_test)
if self.use_sigmoid_feature:
X_train = 1 / (1 + np.exp(-X_train))
X_val = 1 / (1 + np.exp(-X_val))
X_test = 1 / (1 + np.exp(-X_test))
if self.use_tanh_feature:
X_train = np.tanh(X_train)
X_val = np.tanh(X_val)
X_test = np.tanh(X_test)
if self.use_MinMaxScaler:
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
if self.clip_Xval_Xtest is not None:
logging.info("Before cliping,\n"
"Val data (min,max) = ({}, {})\n"
"Test data (min,max) = ({}, {})".format(
X_val.min(),
X_val.max(),
X_test.min(),
X_test.max(),
))
X_val = np.clip(X_val, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
X_test = np.clip(X_test, self.clip_Xval_Xtest[0], self.clip_Xval_Xtest[1])
return X_train, X_val, X_test
def normalize_data(self):
self.X_train = np.nan_to_num(self.X_train)
self.X_val = np.nan_to_num(self.X_val)
self.X_test = np.nan_to_num(self.X_test)
self.X_train, self.X_val, self.X_test = self._normalize_rna(self.X_train, self.X_val, self.X_test)
def run_exp(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None):
logging.info("Running {} repeat {} fold {}".format(model_name, repeat, fold))
output_prefix = model_name + model_suffix
if 'random_predictor' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'mean_control' in model_name:
# self.compute_metric(cm, 'train', model_name, model_suffix, self.y_train, self.y_train, com_mat, repeat,
# self.y_train2)
# self.compute_metric(cm, 'val', model_name, model_suffix, self.y_train, self.y_val, com_mat, repeat,
# self.y_val2)
self.compute_metric(None, 'test', model_name, model_suffix, self.y_train, self.y_test, com_mat, repeat,
self.y_test2)
elif 'expression_control' in model_name:
self.compute_metric(None, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
elif 'random_forest' in model_name:
sk_all = []
params['n_jobs'] = -1
for i in range(self.y_train.shape[1]):
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train[:, i])
sk_all.append(sk)
self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self.compute_metric(sk_all, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
if hasattr(self, 'load_result_dir'):
load_ckpt = os.path.join(self.load_result_dir,
'{}_{}_{}.tar'.format(model_name + model_suffix, self.model_v,
params['seed']))
cm.train(self.X_train, com_mat, self.y_train, load_weight_dir=load_ckpt, mask_fully=com_mat_fully)
else:
y_val_index = self.idx['val']
y_col = self.y.columns
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train, mask_fully=com_mat_fully)
# self.analyze_weights(cm, model_name, model_suffix)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat,
self.y_train2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
self.compute_metric(cm, 'test', model_name, model_suffix, self.X_test, self.y_test, com_mat, repeat,
self.y_test2)
self._clear_gpu(model_name, model_suffix)
model_suffix = str(params['seed']) + 'repeat' + str(repeat)
self.compute_metric_all_test('test', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
def run_exp_cv(self, model_name, model_suffix, params, com_mat, repeat, fold,
community_filter=None, com_mat_fully=None, grid_name=None):
logging.info("Running {}".format(model_suffix))
if 'random_forest' in self.run_mode:
embed()
sys.exit(0)
params['n_jobs'] = -1
sk = SklearnModel(model_name + model_suffix, params)
sk.train(self.X_train, self.y_train)
self.compute_metric(sk, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
self.compute_metric(sk, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
# sk_all = []
# for i in range(self.y_train.shape[1]):
# sk = SklearnModel(model_name + model_suffix, params)
# sk.train(self.X_train, self.y_train[:, i])
# sk_all.append(sk)
#
# self.compute_metric(sk_all, 'train', model_name, model_suffix, self.X_train, self.y_train, com_mat, repeat)
# self.compute_metric(sk_all, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat)
else:
if self.use_community_filter:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all, community_filter,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
else:
cm = BioVNNmodel(model_name + model_suffix, params, self.result_path,
self.community_hierarchy_dicts_all,
class_weight_neg=self.class_weight_neg,
class_weight_pos=self.class_weight_pos,
sample_class_weight_neg=self.sample_class_weight_neg,
sample_class_weight_pos=self.sample_class_weight_pos,
group_level_dict=self.community_level_dict,
level_group_dict=self.level_community_dict)
y_val_index = self.idx['val']
y_col = self.y.columns
output_prefix = model_name + model_suffix
cm.train(self.X_train, com_mat, self.y_train, None, self.X_val, self.y_val, y_train2=self.y_train2,
y_val2=self.y_val2, output_prefix=output_prefix, y_val_index=y_val_index, y_col=y_col,
mask_fully=com_mat_fully)
self._clear_gpu(model_name, model_suffix)
cm.train(self.X_train, com_mat, self.y_train)
self.compute_metric(cm, 'val', model_name, model_suffix, self.X_val, self.y_val, com_mat, repeat,
self.y_val2)
self._clear_gpu(model_name, model_suffix)
if not self.save_model_ckpt:
cm._rm_ckpt()
self.output_metric()
model_suffix = str(params['seed']) + 'repeat' + str(repeat) + '_' + grid_name
self.compute_metric_all_test('val', model_name, model_suffix, self.X_test, self.y_test, repeat)
self.output_metric()
metric_output = {}
for x in self.metric_output:
if self.metric_output[x].shape[0] > 0:
df = self.metric_output[x].copy()
df = df.loc[['fold' not in y for y in df.index]]
if df.shape[0] > 0:
grid_df = self.grid_df.copy().T
grid_df.index = df.index
metric_output[x] = pd.concat([df, grid_df], axis=1)
self.output_metric(metric_output, '_all')
def perform_cv(self, model_name, params):
grid = ParameterGrid(params['grid_search'])
params_backbone = params.copy()
self.grid_df = pd.DataFrame()
logging.info("{} points are searching in grid".format(len(grid)))
for i, param_grid in enumerate(grid):
self.grid_df = pd.concat([self.grid_df, pd.Series(param_grid, name=i)], axis=1)
self.grid_df.to_csv(os.path.join(self.result_path, 'grid_cv.tsv'), sep="\t")
grid_name = 'grid_{}'.format(i)
logging.info("Running cross-validation for \n{}".format(self.grid_df[i]))
params.update(param_grid)
self.__dict__.update(param_grid)
self.prepare_data()
for repeat in range(self.repeat_n):
params['seed'] = params['seeds'][repeat]
# self.community_matrix_random = lil_matrix(self.community_matrix.shape)
np.random.seed(params['seed'])
if 'clh_v' in self.model_v:
mask = self.child_map_all
mask_random = self.child_map_all_random
mask_ones = self.child_map_all_ones
else:
mask = self.community_hierarchy
mask_random = self.community_hierarchy_random
mask_ones = self.community_hierarchy_ones
self.split_data()
for cv_fold in range(self.fold_n):
model_suffix = str(params['seed']) + 'repeat' + str(
repeat) + '_' + grid_name + '_' + 'cv_fold' + str(cv_fold)
self.get_split_data(repeat, cv_fold)
self.calculate_weights()
self.normalize_data()
if 'ref' in self.run_mode:
self.run_exp_cv(model_name, model_suffix,
params, mask, repeat, cv_fold, self.community_filter_map,
grid_name=grid_name)
elif 'random_forest' in self.run_mode:
self.run_exp_cv('random_forest', model_suffix,
params, mask_random, repeat, cv_fold, None,
grid_name=grid_name)
elif 'random_predictor' in self.run_mode:
self.run_exp_cv('random_predictor', model_suffix,
params, mask_random, repeat, cv_fold, self.community_filter_map_random,
grid_name=grid_name)
elif 'random' in self.run_mode:
self.run_exp_cv('random_control', model_suffix,
params, mask_random, repeat, cv_fold, self.community_filter_map_random,
grid_name=grid_name)
elif 'full' in self.run_mode:
self.run_exp_cv('gene_control', model_suffix,
params, mask, repeat, cv_fold, None, mask_ones, grid_name=grid_name)
if self.cv_fold_only_run == (cv_fold + 1):
break
def _clear_gpu(self, model_name, model_suffix):
if self.use_cuda:
logging.debug("Clearing session {}".format(model_name + model_suffix))
msg = 'before clean'
output = subprocess.run('nvidia-smi', stdout=subprocess.PIPE)
logging.debug("{}\n{}".format(msg, str(output).replace('\\n', '\n')))
torch.cuda.empty_cache()
msg = 'after clean'
output = subprocess.run('nvidia-smi', stdout=subprocess.PIPE)
logging.debug("{}\n{}".format(msg, str(output).replace('\\n', '\n')))
logging.debug("Cleared session {}".format(model_name + model_suffix))
def compute_metric(self, cm, data_type, model_name, model_suffix, X, y_true, com_mat, repeat, y_true2=None,
pred=None):
output_prefix = model_name + model_suffix + '_' + data_type
y_index = self.idx[data_type]
y_col = self.y.columns
pred2 = None
if pred is None:
if 'random_predictor' in model_name:
pred = []
prng = np.random.RandomState()
for i in range(y_true.shape[1]):
pred.append(prng.rand(y_true.shape[0], 1))
pred = np.concatenate(pred, axis=1).flatten()
elif 'mean_control' in model_name:
pred = np.tile(X.mean(axis=0), y_true.shape[0])
elif 'expression_control' in model_name:
x_df = pd.DataFrame(X, columns=self.rna.columns, index=y_index)
y_df = pd.DataFrame(y_true, columns=self.dependency.columns, index=y_index)
genes_without_expression = sorted(list(set(y_df.columns) - set(x_df.columns)))
logging.info(f'{len(genes_without_expression)} genes do not have expression for prediction')
genes_with_expression = sorted(list(set(y_df.columns) & set(x_df.columns)))
logging.info(f'{len(genes_with_expression)} genes having expression were used to predict dependency')
pred = x_df[genes_with_expression]
y_true = y_df[genes_with_expression]
elif 'LogisticRegression' in model_name or 'xgb' in model_name or 'random_forest' in model_name:
if isinstance(cm, list):
pred = []
for cm_i in cm:
pred.append(cm_i.predict(X))
pred = np.array(pred).T.flatten()
else:
pred = cm.predict(X)
else:
pred = cm.predict(X, y_true, 10000, output_prefix, y_index=y_index, y_col=y_col)
self.compute_overall_cor(pred, y_true, repeat, data_type, model_name, model_suffix, output_prefix,
pred2=None, y_true2=None)
def compute_overall_cor(self, pred, y_true, repeat, data_type, model_name, model_suffix, output_prefix,
pred2=None, y_true2=None):
if isinstance(pred, pd.DataFrame):
y_true_flatten = y_true.values.flatten()
pred_flatten = pred.values.flatten()
else:
y_true_flatten = y_true.flatten()
pred_flatten = pred
pearson_r = np.corrcoef(pred_flatten, y_true_flatten)[0, 1]
spearman_rho = scipy.stats.spearmanr(pred_flatten, y_true_flatten)[0]
y_true_flatten_binary = (y_true_flatten >= 0.5) + 0
if np.sum(y_true_flatten_binary) == 0 or np.sum(y_true_flatten_binary) == len(y_true_flatten_binary):
auroc, auprc, f1, f1_weighted = np.nan, np.nan, np.nan, np.nan
else:
auroc = roc_auc_score(y_true_flatten_binary, pred_flatten)
auprc = average_precision_score(y_true_flatten_binary, pred_flatten)
pred_binary = (pred_flatten >= 0.5) + 0
f1 = f1_score(y_true_flatten_binary, pred_binary)
f1_weighted = f1_score(y_true_flatten_binary, pred_binary, average='weighted')
fpr, tpr, _thresholds = roc_curve(y_true_flatten_binary, pred_flatten)
prediction_n = len(y_true_flatten_binary)
gs_positive_n = np.sum(y_true_flatten_binary)
plot_ROC(fpr, tpr, prediction_n, gs_positive_n, auroc, self.result_path, output_prefix)
self.pearson_r[repeat][data_type][model_name].append(pearson_r)
self.spearman_rho[repeat][data_type][model_name].append(spearman_rho)
if pred2 is not None:
pearson_r2 = np.corrcoef(pred2, y_true2.flatten())[0, 1]
self.pearson_r2[repeat][data_type][model_name].append(pearson_r2)
else:
pearson_r2 = None
if pearson_r2 is not None:
metric_df = pd.DataFrame([pearson_r, spearman_rho, pearson_r2]).T
metric_df.columns = ['Pearson_r', 'Spearman_rho', 'Pearson_r2']
logging.info(
"{} {} Pearson r {}; Spearman rho {}; Pearson r 2 {};".format(model_name + model_suffix, data_type,
pearson_r, spearman_rho, pearson_r2))
else:
metric_df = pd.DataFrame([pearson_r, spearman_rho, auroc, auprc, f1, f1_weighted]).T
metric_df.columns = ['Pearson_r', 'Spearman_rho', 'AUROC', 'AUPRC', 'F1', 'F1_weighted']
logging.info(
"{} {} Pearson r {}; Spearman rho {}; AUROC {}; AUPRC {}; F1 {}; F1_weighted {}".format(
model_name + model_suffix, data_type, pearson_r,
spearman_rho, auroc, auprc, f1, f1_weighted))
metric_df.index = [model_name + model_suffix]
self.metric_output[data_type] = pd.concat([self.metric_output[data_type], metric_df])
if isinstance(pred, pd.DataFrame):
self.plot_pred_true(pred.values.flatten(), y_true.values, output_prefix)
else:
self.plot_pred_true(pred, y_true, output_prefix)
pred = pred.reshape(y_true.shape)
pred = pd.DataFrame(pred, index=self.idx[data_type], columns=self.y.columns)
self.output_pred(pred, output_prefix)
self.output_pred(pred[list(set(self.output_pred_small) & set(pred.columns))], 'small_' + output_prefix)
if model_name != 'expression_control' and 'random_forest' not in model_name:
if 'Timestamped' in self.__class__.__name__:
self.compute_gene_level_cor(pred, output_prefix, y_true)
self.compute_gene_level_auc(pred, output_prefix, y_true)
else:
self.compute_gene_level_cor(pred, output_prefix)
self.compute_gene_level_auc(pred, output_prefix)
if pred2 is not None:
output_prefix = model_name + model_suffix + '_' + data_type + '_2'
self.plot_pred_true2(pred2, y_true2, output_prefix)
pred2 = pred2.reshape(y_true.shape)
pred2 = pd.DataFrame(pred2, index=self.idx[data_type], columns=self.y.columns)
self.output_pred(pred2, output_prefix)
self.output_pred(pred2[list(set(self.output_pred_small) & set(pred2.columns))], 'small_' + output_prefix)
def compute_gene_level_cor(self, pred, output_prefix, y=None):
if y is None:
if 'Timestamped' in self.__class__.__name__ or 'Postanalysis_ts' in self.__class__.__name__:
y = self.y_target
else:
y = self.y
df_gene_cor_var = gene_level_cor(pred, y)
logging.info(df_gene_cor_var.loc[self.select_genes_in_label])
self.output_pred(df_gene_cor_var, 'by_gene_' + output_prefix)
plot_pred_true_r_by_gene_MAD(df_gene_cor_var, self.result_path, output_prefix, 'r')
plot_pred_true_r_by_gene_mean(df_gene_cor_var, self.result_path, output_prefix, 'r')
plot_hist_cor(df_gene_cor_var['Pearson_r'], self.result_path, output_prefix)
def compute_gene_level_auc(self, pred, output_prefix, labels_all=None):
if labels_all is None:
labels_all_binary = self.y_binary.loc[pred.index]
labels_all = self.y.loc[pred.index]
else:
labels_all_binary = ((labels_all >= 0.5) + 0).astype(int)
df_gene_auc, df_gene_bootstrap = individual_auc(pred, labels_all_binary, labels_all)
logging.info(output_prefix)
plot_top_ROC(df_gene_auc, labels_all_binary, pred, self.result_path, 'by_gene_' + output_prefix)
self.output_pred(df_gene_auc, 'by_gene_auc_' + output_prefix)
for x, y in zip(df_gene_bootstrap, ['AUROCs', 'AUPRCs', 'pAUROCs', 'pAUPRCs']):
self.output_pred(x, 'by_gene_{}_{}'.format(y, output_prefix))
plot_pred_true_r_by_gene_MAD(df_gene_auc, self.result_path, output_prefix, mode='auc')
plot_pred_true_r_by_gene_mean(df_gene_auc, self.result_path, output_prefix, mode='auc')
plot_hist_auc(df_gene_auc['AUROC'], self.result_path, 'by_gene_' + output_prefix)
plot_hist_auc(df_gene_auc['AUPRC'], self.result_path, 'by_gene_' + output_prefix, mode='AUPRC')
if pred.shape[1] >= 10:
df_cell_auc, df_cell_bootstrap = individual_auc(pred.T, labels_all_binary.T, labels_all.T)
plot_top_ROC(df_cell_auc, labels_all_binary.T, pred.T, self.result_path, 'by_cell_' + output_prefix)
self.output_pred(df_cell_auc, 'by_cell_auc_' + output_prefix)
for x, y in zip(df_cell_bootstrap, ['AUROCs', 'AUPRCs', 'pAUROCs', 'pAUPRCs']):
self.output_pred(x, 'by_cell_{}_{}'.format(y, output_prefix))
plot_hist_auc(df_cell_auc['AUROC'], self.result_path, 'by_cell_' + output_prefix)
plot_hist_auc(df_cell_auc['AUPRC'], self.result_path, 'by_cell_' + output_prefix, mode='AUPRC')
def compute_metric_all_test(self, data_type, model_name, model_suffix, X, y_true, repeat, y_true2=None):
pred = self.load_prediction_all(data_type, model_name, model_suffix)
y_true = self.y.loc[pred.index, pred.columns]
output_prefix = model_name + model_suffix + '_' + data_type
self.compute_overall_cor(pred, y_true, repeat, data_type, model_name, model_suffix, output_prefix,
pred2=None, y_true2=None)
gene = self.genes_in_label[0]
output_prefix = output_prefix + '_' + gene
self.plot_pred_true_scatter(pred[gene], self.y.loc[pred.index, gene], output_prefix)
df_pred = pd.DataFrame(index=pred.index)
df_pred['Predicted'] = pred[gene]
df_pred['Measured'] = self.y.loc[pred.index, gene]
self.output_pred(df_pred, '' + output_prefix)
def load_prediction_all(self, data_type, model_name, model_suffix):
if hasattr(self, 'load_result_dir'):
load_dir = self.load_result_dir
else:
load_dir = self.result_path
pred_files = glob('{}/pred_{}*fold*{}.tsv'.format(load_dir, model_name + model_suffix, data_type))
pred =
|
pd.read_csv(pred_files[0], sep='\t', index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
"""Utilities for processing the overall event.
The module contains useful functions for handling data at the event level. More fine-grained utilities are
reserved for `detector_utils` and `cell_utils`.
"""
# Todo: Pull module IDs out into a csv file for readability
# System
import os
import logging
import itertools
# Externals
import scipy as sp
import numpy as np
import pandas as pd
import trackml.dataset
import torch
from torch_geometric.data import Data
# Device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Locals
# from .cell_utils import get_one_event
# def get_cell_information(data, cell_features, detector_orig, detector_proc, endcaps, noise):
# event_file = data.event_file
# evtid = event_file[-4:]
# angles = get_one_event(event_file, detector_orig, detector_proc)
# logging.info("Angles: {}".format(angles))
# hid = pd.DataFrame(data.hid.numpy(), columns=["hit_id"])
# cell_data = torch.from_numpy(
# (hid.merge(angles, on="hit_id")[cell_features]).to_numpy()
# ).float()
# logging.info("DF merged")
# data.cell_data = cell_data
# return data
def get_layerwise_edges(hits):
"""Build Layerwise True Edges i.e. the True Graph. Here `hits` represent complete event."""
# ADAK: Sort by increasing distance from production (IP)
hits = hits.assign(
R=np.sqrt(
(hits.x - hits.vx) ** 2 + (hits.y - hits.vy) ** 2 + (hits.z - hits.vz) ** 2
)
)
hits = hits.sort_values("R").reset_index(drop=True).reset_index(drop=False)
hits.loc[hits["particle_id"] == 0, "particle_id"] = np.nan
hit_list = (
hits.groupby(["particle_id", "layer_id"], sort=False)["index"] # ADAK: layer >> layer_id
.agg(lambda x: list(x))
.groupby(level=0)
.agg(lambda x: list(x))
)
true_edges = []
for row in hit_list.values:
for i, j in zip(row[0:-1], row[1:]):
true_edges.extend(list(itertools.product(i, j)))
true_edges = np.array(true_edges).T
return true_edges, hits
"""
def get_layerwise_edges(hits):
# Calculate and assign parameter R = sqrt(x**2 + y**2 + z**2),
hits = hits.assign(
R=np.sqrt(
(hits.x - hits.vx) ** 2 + (hits.y - hits.vy) ** 2 + (hits.z - hits.vz) ** 2
)
)
# Sort the hits according to R. First, reset_index and drop. Second, reset_index
# again to get the 'index' column which we can use later on.
hits = hits.sort_values("R").reset_index(drop=True).reset_index(drop=False)
# Find hits for particle_id == 0 and assign to -nan value
hits.loc[hits["particle_id"] == 0, "particle_id"] = np.nan
# Get hit_list by particle_id, layer_id. It will return indice of hits
# from each particle_id in all layers. The .agg(), are just to format the
# the output for better handling, as we will see later.
hit_list = (
hits.groupby(["particle_id", "layer_id"], sort=False)["index"]
.agg(lambda x: list(x))
.groupby(level=0)
.agg(lambda x: list(x))
)
# Build True Edges. First, get one row and cascade it (row[0:-1]: 0 to n-1 elements,
# row[1:]: 1 to n elements). One can use itertools.product(b, b) to creat cartesian
# product which is set of ordered pairs (a, b) provided a E row[0:-1] and b E row[1:].
# The itertools.product(b, b) returns an iterable (list, set, etc), so use list.extend()
# to append the true_edges list. Note: list.append() add only one element to end of list.
true_edges = []
for row in hit_list.values:
for i, j in zip(row[0:-1], row[1:]):
true_edges.extend(list(itertools.product(i, j))) # extend(): extends existi
# Convert to ndarray and transpose it.
true_edges = np.array(true_edges).T
# As hits are modified due R param so return it as well.
return true_edges, hits
"""
def get_modulewise_edges(hits):
"""Get modulewise (layerless) true edge list. Here hits represent complete event."""
signal = hits[
((~hits.particle_id.isna()) & (hits.particle_id != 0)) & (~hits.vx.isna())
]
signal = signal.drop_duplicates(
subset=["particle_id", "volume_id", "layer_id", "module_id"]
)
# Sort by increasing distance from production
signal = signal.assign(
R=np.sqrt(
(signal.x - signal.vx) ** 2
+ (signal.y - signal.vy) ** 2
+ (signal.z - signal.vz) ** 2
)
)
signal = signal.sort_values("R").reset_index(drop=False)
# Handle re-indexing
signal = signal.rename(columns={"index": "unsorted_index"}).reset_index(drop=False)
signal.loc[signal["particle_id"] == 0, "particle_id"] = np.nan
# Group by particle ID
signal_list = signal.groupby(["particle_id"], sort=False)["index"].agg(
lambda x: list(x)
)
true_edges = []
for row in signal_list.values:
for i, j in zip(row[:-1], row[1:]):
true_edges.append([i, j])
true_edges = np.array(true_edges).T
true_edges = signal.unsorted_index.values[true_edges]
return true_edges
def select_edges(hits1, hits2, filtering=True):
"""Select edges using a particular phi range or sectors. Currently, I am selecting edges
only in the neighboring sectors i.e. hit1 is paired with hit2 in immediate sectors only."""
# Start with all possible pairs of hits, sector_id is for sectorwise selection
keys = ['event_id', 'r', 'phi', 'isochrone', 'sector_id']
hit_pairs = hits1[keys].reset_index().merge(hits2[keys].reset_index(), on='event_id', suffixes=('_1', '_2'))
if filtering:
dSector = (hit_pairs['sector_id_1'] - hit_pairs['sector_id_2'])
sector_mask = ((dSector.abs() < 2) | (dSector.abs() == 5))
edges = hit_pairs[['index_1', 'index_2']][sector_mask]
else:
edges = hit_pairs[['index_1', 'index_2']]
return edges
def construct_edges(hits, layer_pairs, filtering=True):
"""Construct edges between hit pairs in adjacent layers"""
# Loop over layer pairs and construct edges
layer_groups = hits.groupby('layer')
edges = []
for (layer1, layer2) in layer_pairs:
# Find and join all hit pairs
try:
hits1 = layer_groups.get_group(layer1)
hits2 = layer_groups.get_group(layer2)
# If an event has no hits on a layer, we get a KeyError.
# In that case we just skip to the next layer pair
except KeyError as e:
logging.info('skipping empty layer: %s' % e)
continue
# Construct the edges
edges.append(select_edges(hits1, hits2, filtering))
# Combine edges from all layer pairs
edges =
|
pd.concat(edges)
|
pandas.concat
|
import Functions
import pandas as pd
import matplotlib.pyplot as plt
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def cleaning(df):
# Importing Bot user names
bots = pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';')
# Removing bots from the data
df = df[~df.author.isin(bots.bot_names)]
# Removing any NA's
df.dropna()
# Cleaning the text data, fuld af pis i bunden der prøver hvert enkelt før de røg sammen, slet hvis du ikke er intra
keeplist = "?.!,'_-"
import re
Adj_comment = pd.DataFrame(
[re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)'
r'[\S]*\s?|(/u/|u/)\S+|(/r/|r/)\S+|[\x00-\x1f\x7f-\xff]|[0-9]+|(&g|&l)\S+'
r'|[^\s\w' + keeplist + ']', "", elem) for elem in df['body']], columns=['body'])
df['body'] = Adj_comment['body']
return df
period = ['2014', '2015_01', '2015_02', '2015_03', '2015_04', '2015_05', '2015_06', '2015_07', '2015_08', '2015_09',
'2015_10', '2015_11', '2015_12', '2016_01', '2016_02', '2016_03', '2016_04', '2016_05', '2016_06', '2016_07',
'2016_08', '2016_09', '2016_10',
'2016_11', '2016_12', '2017_01', '2017_02', '2017_03', '2017_04', '2017_05', '2017_06', '2017_07', '2017_08',
'2017_09',
'2017_10', '2017_11', '2017_12', '2018_01', '2018_02', '2018_03', '2018_04', '2018_05', '2018_06', '2018_07',
'2018_08',
'2018_09', '2018_10', '2018_11', '2018_12', '2019_01', '2019_02', '2019_03', '2019_04', '2019_05', '2019_06',
'2019_07',
'2019_08', '2019_09']
dfAllData = pd.DataFrame()
for sPeriod in period:
query = r"""
#standardSQL
SELECT author, subreddit, created_utc, score, controversiality, body
FROM `fh-bigquery.reddit_comments.{}`
WHERE REGEXP_CONTAINS(body, r'(?i)\b Dash\b')
""".format(sPeriod)
dfData = Functions.collect_big_query(sQuery=query)
print(sPeriod + ' Collected')
print(sPeriod + ' cleaned')
dfAllData = dfAllData.append(dfData)
del dfData
dfAllData.to_csv('Dash_sentiment.csv')
coin_list = ['BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS']
dfSubRed = pd.DataFrame()
for scoin in coin_list:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfSubRed = pd.concat([dfSubRed, pd.DataFrame(dfTemp.subreddit.value_counts()[:10].index),
pd.DataFrame(dfTemp.subreddit.value_counts()[:10].values)], axis=1)
# Removing disturbing subreddits:
# EOS:
EOS_list = ['ffxiv', 'photography', 'masseffect', 'whowouldwin', 'astrophotography', 'elementaryos']
dfTemp = pd.read_csv('EOS_sentiment.csv', index_col=0)
dfTemp = dfTemp[~dfTemp['subreddit'].isin(EOS_list)]
dfTemp.to_csv('EOS_R_Sentiment.csv')
# Ripple: indianapolis
XRP_list = ['indianapolis']
dfTemp = pd.read_csv('XRP_sentiment.csv', index_col=0) # 510558
dfTemp = dfTemp[~dfTemp['subreddit'].isin(XRP_list)]
dfTemp.to_csv('XRP_R_Sentiment.csv')
# BNB: SquaredCircle, dragonballfighterz, StreetFighter, step1, AirBnB
BNB_list = ['SquaredCircle', 'dragonballfighterz', 'StreetFighter', 'step1', 'AirBnB']
dfTemp = pd.read_csv('BNB_R_Sentiment.csv', index_col=0) # 109630
dfTemp = dfTemp[~dfTemp['subreddit'].isin(BNB_list)]
dfTemp.to_csv('BNB_R_Sentiment.csv')
# New coin list
coin_list_R = ['BCH', 'Cardona', 'dogecoin', 'EOS_R', 'ETH', 'LTC', 'XRP_R', 'Monero', 'BNB_R', 'IOTA', 'TEZOS']
# Removing NA's
for scoin in coin_list_R:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfTemp.to_csv(scoin + 'NA_Sentiment.csv')
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
for scoin in coin_list_NA:
dfTemp = pd.read_csv(scoin + '_Sentiment.csv', index_col=0)
dfTemp = cleaning(dfTemp)
# dfAllData = Functions.language_filter(dfAllData, series='body', language_select='en')
dfTemp = dfTemp.reset_index(drop=True)
dfTemp = Functions.get_sentiment(dfTemp, series='body')
dfTemp = group_sentiment(dfTemp)
dfTemp.to_csv(scoin + '_Actual_Sentiment.csv')
# Kør herfra ved start for at få fat i de nødvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Classes for analyzing RSMTool predictions, metrics, etc.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import warnings
from functools import partial
import numpy as np
import pandas as pd
from scipy.stats import kurtosis, pearsonr
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, mean_squared_error, r2_score
from skll.metrics import kappa
from .container import DataContainer
from .utils.metrics import (agreement,
difference_of_standardized_means,
partial_correlations,
quadratic_weighted_kappa,
standardized_mean_difference)
from .utils.prmse import get_true_score_evaluations
class Analyzer:
"""Class to perform analysis on all metrics, predictions, etc."""
@staticmethod
def check_frame_names(data_container, dataframe_names):
"""
Check that all specified dataframes are available.
This method checks to make sure all specified DataFrames
are in the given data container object.
Parameters
----------
data_container : container.DataContainer
A DataContainer object
dataframe_names : list of str
The names of the DataFrames expected in the
DataContainer object.
Raises
------
KeyError
If a given dataframe_name is not in the DataContainer object.
"""
for dataframe_name in dataframe_names:
if dataframe_name not in data_container:
raise KeyError('The DataFrame `{}` does not exist in the '
'DataContainer object.'.format(dataframe_name))
@staticmethod
def check_param_names(configuration_obj, parameter_names):
"""
Check that all specified parameters are available.
This method checks to make sure all specified parameters
are in the given configuration object.
Parameters
----------
configuration_obj : configuration_parser.Configuration
A configuration object
parameter_names : list of str
The names of the parameters (keys) expected in the
Configuration object.
Raises
------
KeyError
If a given parameter_name is not in the Configuration object.
"""
for parameter_name in parameter_names:
if parameter_name not in configuration_obj:
raise KeyError('The parameter `{}` does not exist in the '
'Configuration object.'.format(parameter_name))
@staticmethod
def analyze_excluded_responses(df,
features,
header,
exclude_zero_scores=True,
exclude_listwise=False):
"""
Compute statistics for responses excluded from analyses.
This method computes various statistics for the responses that
were excluded from analyses, either in the training set or in
the test set.
Parameters
----------
df : pandas DataFrame
Data frame containing the excluded responses
features : list of str
List of column names containing the features
to which we want to restrict the analyses.
header : str
String to be used as the table header for the
output data frame.
exclude_zero_scores : bool, optional
Whether or not the zero-score responses
should be counted in the exclusion statistics.
Defaults to ``True``.
exclude_listwise : bool, optional
Whether or not the candidates were excluded
based on minimal number of responses.
Defaults to ``False``.
Returns
-------
df_full_crosstab : pandas DataFrame
Two-dimensional data frame containing the
exclusion statistics.
"""
# create an empty output data frame
df_full_crosstab = pd.DataFrame({'all features numeric': [0, 0, 0],
'non-numeric feature values': [0, 0, 0]},
index=['numeric non-zero human score',
'zero human score',
'non-numeric human score'])
if not df.empty:
# re-code human scores into numeric, missing or zero
df['score_category'] = 'numeric non-zero human score'
df.loc[df['sc1'].isnull(), 'score_category'] = 'non-numeric human score'
df.loc[df['sc1'].astype(float) == 0, 'score_category'] = 'zero human score'
# recode feature values: a response with at least one
# missing feature is assigned 'non-numeric feature values'
df_features_only = df[features + ['spkitemid']]
null_feature_rows = df_features_only.isnull().any(axis=1)
df_null_features = df_features_only[null_feature_rows]
df['feat_category'] = 'all features numeric'
df.loc[df['spkitemid'].isin(df_null_features['spkitemid']),
'feat_category'] = 'non-numeric feature values'
# crosstabulate
df_crosstab = pd.crosstab(df['score_category'],
df['feat_category'])
df_full_crosstab.update(df_crosstab)
# convert back to integers as these are all counts
df_full_crosstab = df_full_crosstab.astype(int)
df_full_crosstab.insert(0, header, df_full_crosstab.index)
if not exclude_listwise:
# if we are not excluding listwise, rename the first cell so
# that it is not set to zero
assert(df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] = '-'
# if we are not excluding the zeros, rename the corresponding cells
# so that they are not set to zero. We do not do this for listwise exclusion
if not exclude_zero_scores:
assert(df_full_crosstab.loc['zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['zero human score',
'all features numeric'] = '-'
return df_full_crosstab
@staticmethod
def analyze_used_responses(df_train, df_test, subgroups, candidate_column):
"""
Compute statistics for responses used in analyses.
This method computes various statistics on the responses that
were used in analyses, either in the training set or in the
test set.
Parameters
----------
df_train : pandas DataFrame
Data frame containing the response information
for the training set.
df_test : pandas DataFrame
Data frame containing the response information
for the test set.
subgroups : list of str
List of column names that contain grouping
information.
candidate_column : str
Column name that contains candidate
identification information.
Returns
-------
df_analysis : pandas DataFrame
Data frame containing information about the used
responses.
"""
# create a basic data frame for responses only
train_responses = set(df_train['spkitemid'])
test_responses = set(df_test['spkitemid'])
rows = [{'partition': 'Training', 'responses': len(train_responses)},
{'partition': 'Evaluation', 'responses': len(test_responses)},
{'partition': 'Overlapping', 'responses': len(train_responses & test_responses)},
{'partition': 'Total', 'responses': len(train_responses | test_responses)}]
df_analysis = pd.DataFrame.from_dict(rows)
columns = ['partition', 'responses'] + subgroups
if candidate_column:
train_candidates = set(df_train['candidate'])
test_candidates = set(df_test['candidate'])
df_analysis['candidates'] = [len(train_candidates), len(test_candidates),
len(train_candidates & test_candidates),
len(train_candidates | test_candidates)]
columns = ['partition', 'responses', 'candidates'] + subgroups
for group in subgroups:
train_group = set(df_train[group])
test_group = set(df_test[group])
df_analysis[group] = [len(train_group), len(test_group),
len(train_group & test_group),
len(train_group | test_group)]
df_analysis = df_analysis[columns]
return df_analysis
@staticmethod
def analyze_used_predictions(df_test, subgroups, candidate_column):
"""
Compute various statistics for predictions used in analyses.
Parameters
----------
df_test : pandas DataFrame
Data frame containing the test set predictions.
subgroups : list of str
List of column names that contain grouping
information.
candidate_column : str
Column name that contains candidate
identification information.
Returns
-------
df_analysis : pandas DataFrame
Data frame containing information about the used
predictions.
"""
rows = [{'partition': 'Evaluation', 'responses': df_test['spkitemid'].size}]
df_analysis =
|
pd.DataFrame.from_dict(rows)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
'''
Copyright 2018, University of Freiburg.
Chair of Algorithms and Data Structures.
<NAME> <<EMAIL>>
'''
'''
Simple script for looping through many csv Files and concatenating their
columns.
'''
import os
import sys
import glob
import argparse
import pandas as pd
def combine_csvs(input_dir, outname):
'''
'''
csv_files = glob.glob(os.path.join(args.input_dir, '*.csv'))
dff =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Utilities for examining ABS NOM unit record
"""
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from IPython.display import display_html, display
from matplotlib.patches import Patch
from chris_utilities import adjust_chart
import file_paths
# the data storage
base_data_folder = file_paths.base_data_folder
abs_data_folder = file_paths.abs_data_folder
unit_record_folder = file_paths.unit_record_folder
individual_movements_folder = file_paths.individual_movements_folder
abs_nom_propensity = file_paths.abs_nom_propensity
abs_traveller_characteristics_folder = file_paths.abs_traveller_characteristics
grant_data_folder = file_paths.grant_data_folder
dict_data_folder = file_paths.dict_data_folder
program_data_folder = file_paths.program_data_folder
# local to current forecasting period folder
forecasting_data_folder = Path("data/forecasting")
forecasting_input_folder = forecasting_data_folder / "input"
### Utilities to read in raw ABS data:
def process_original_ABS_data(abs_original_data_folder, analysis_folder):
"""Process the SAS data, include removing previous preliminary parquet
and replace with final parquet, and add new preliminary parquet for latest quarter
Parameters
----------
abs_original_data_folder : Path ojbect
SAS data directory
analysis_folder : Path object
ABS Traveller characteristics folder pat
Returns
-------
None
Raises
------
ValueError
Check ABS NOM files must commence with p or f
This differentiates between preliminary and final NOM
Raise error to advice user that RTS file name convention not in place
"""
# TODO: read from the zip file rather than unzipped data
# variables to convert to ints or strings
ints_preliminary = [
"person_id",
"sex",
"country_of_birth",
"country_of_citizenship",
"country_of_stay",
"initial_erp_flag",
"final_erp_flag",
"duration_movement_sort_key",
"nom_direction",
"duration_in_australia_category",
"count_of_movements",
"initial_category_of_travel",
"age",
"status_flag",
"reason_for_journey",
"odb_time_code",
]
## For preliminary leave as floats: 'rky_val'
ints_final = [
"person_id",
"sex",
"country_of_birth",
"country_of_citizenship",
"country_of_stay",
"initial_erp_flag",
"final_erp_flag",
"duration_movement_sort_key",
"nom_direction",
"duration_in_australia_category",
"count_of_movements",
"initial_category_of_travel",
"age",
"status_flag",
"reason_for_journey",
"odb_time_code",
"net_erp_effect",
"nom_propensity",
]
# string vars are the same across preliminary and final
string_vars = [
"visa_group",
"visa_subclass",
"visa_applicant_type",
"visa_stream_code",
"stream_code_out",
"state",
"direction",
]
date_times = ["Duration_movement_date"]
### For unzipped sas data filess files
### Requires both options - older folders may not have the zipped version
# for abs_filepath in sorted(abs_original_data_folder.glob("*.sas7bdat")):
# print(abs_filepath.stem)
# df = pd.read_sas(abs_filepath, encoding="latin-1", format="sas7bdat").rename(
# columns=str.lower
# )
for abs_filepath in sorted(abs_original_data_folder.glob("*.sas7bdat")):
print(abs_filepath.stem)
df = pd.read_sas(abs_filepath, encoding="latin-1", format="sas7bdat").rename(
columns=str.lower
)
# for zip_filename in sorted(abs_original_data_folder.glob("*.zip")):
# zipped_file = zipfile.ZipFile(zip_filename, 'r')
# # There's only expected to be one file in each zip
# if len(zipped_file.namelist()) != 1:
# raise ValueError("Chris: zipped file has more than one file...recode!")
# sasfile = zipfile.open(zipped_file.namelist()[0])
# print(sasfile.stem)
# df = pd.read_sas(sasfile, encoding="latin-1", format="sas7bdat").rename(
# columns=str.lower
# )
### need to fix all abs_filepath below
# adjust datatypes and write out:
# string vars are the same across preliminary and final
for col in string_vars:
df[col] = df[col].astype("category")
# integer variables differ across final and preliminary data
if abs_filepath.stem[0] == "p": # preliminary NOM
for col in ints_preliminary:
df[col] = df[col].astype(int)
elif abs_filepath.stem[0] == "f": # final NOM
for col in ints_final:
df[col] = df[col].astype(int)
else:
raise ValueError(
"Chris - ABS NOM files must commence with p or f: {abs_filepath.stem} does not!"
)
write_outfile(df, abs_filepath, abs_original_data_folder, analysis_folder)
return None
def write_outfile(df, abs_filepath, abs_original_data_folder, analysis_folder):
"""
write out the processed ABS data to the ABS data folder and the analysis folder
Parameters
----------
df: pandas dataframe to write out
abs_filepath: Path object of original ABS file
abs_original_data_folder: Path object of path to ABS data folder
analysis_folder: Path to folder containing all NOM unit record parquet files
Returns
-------
None
"""
# ABS NOM filenames are of the type xxxx2018q1.sas...
# Want to extract the date compenent: 2018q1
date_start = abs_filepath.stem.find("2")
if date_start != -1: # if a '2' is found
filename_date = abs_filepath.stem[date_start:]
## append '_p' if it's a preliminary file
if abs_filepath.stem[0] == "p":
filename_date = filename_date + "_p"
else:
raise ValueError(
f"Chris - filename {abs_filepath.stem} does not appear to have a 20XXqY date in it"
)
filename = "traveller_characteristics" + filename_date + ".parquet"
# Write to original ABS folder:
# to keep as history for comparison with updated preliminary/final files
df.to_parquet(abs_original_data_folder / filename)
# Write to folder for analysis
df.to_parquet(analysis_folder / filename)
# if a final file replaces a preliminary file - delete it from the analysis file
if abs_filepath.stem[0] == "f":
preliminary_filename = (
"traveller_characteristics" + filename_date + "_p" + ".parquet"
)
preliminary_path = analysis_folder / preliminary_filename
if preliminary_path.exists():
preliminary_path.unlink()
return None
def get_visa_code_descriptions(vsc_list):
"""
get visa code descriptions
parameters
----------
vsc_list: list
visa suc codes as strings
returns
-------
a dictionary matching visa subcode to description
"""
with open(dict_data_folder / "dict_visa_code_descriptions.pickle", "rb") as pickle_file:
dict_visa_code_descriptions = pickle.load(pickle_file)
for vsc in vsc_list:
print(dict_visa_code_descriptions[vsc])
return dict_visa_code_descriptions
def get_monthly(
df, net_erp_effect, group_by=("Duration_movement_date", "Visa_subclass")
):
"""
Aggregate unit record NOM data to monthly by visa subclass
"""
summary = (
df[df.net_erp_effect == net_erp_effect]
.groupby(group_by)
.net_erp_effect.sum()
.unstack()
)
return summary.resample("M").sum()
def read_single_NOM_file(data_folder, file_name, field_list=None):
if field_list is None:
df = pd.read_parquet(data_folder / file_name)
else:
df = pd.read_parquet(data_folder / file_name, columns=field_list)
return df
def get_NOM_monthly_old(net_erp_effect, data_folder=Path("parquet")):
"""
A generator for returning NOM data selected for arrivals or departures
Parameters
----------
net_erp_effect: contribution to NOM: 1 = arrivals, -1 = departure
data_folder: a Path object to the folder containing ABS NOM unit record data
Yields:
-------
NOM_effect: a dataframe selected on net_erp_effect
"""
assert (net_erp_effect == 1) | (net_erp_effect == -1)
for p in sorted(data_folder.glob("*.parq")):
print(p.stem)
df =
|
pd.read_parquet(p)
|
pandas.read_parquet
|
from Fase1.analizer.abstract.expression import Expression, TYPE
from Fase1.analizer.abstract import expression
from Fase1.analizer.reports import Nodo
from Fase1.analizer.statement.expressions.primitive import Primitive
import pandas as pd
class AggregateFunction(Expression):
"""
Esta clase representa las funciones de agregacion utilizadas en el Group By
"""
def __init__(self, func, colData, row, column) -> None:
super().__init__(row, column)
self.func = func.lower()
self.colData = colData
if colData == "*":
self.temp = func + "(*)"
else:
self.temp = func + "(" + colData.temp + ")"
def execute(self, environment):
countGr = environment.groupCols
if countGr == 0:
if self.colData != "*":
c = self.colData.execute(environment).value
if self.func == "sum":
newDf = c.sum()
elif self.func == "count":
newDf = c.count()
elif self.func == "prom":
newDf = c.mean()
else:
newDf = None
expression.list_errors.append(
"Error: 42725: Error en la funcion "
+ str(self.func)
+ "\n En la linea: "
+ str(self.row)
)
else:
c = environment.dataFrame.iloc[:, -1:]
if self.func == "count":
newDf = len(c)
else:
newDf = None
expression.list_errors.append(
"Error: 42725: Error en la funcion "
+ str(self.func)
+ "\n En la linea: "
+ str(self.row)
)
return Primitive(TYPE.NUMBER, newDf, self.temp, self.row, self.column)
if self.colData != "*":
# Obtiene las ultimas columnas metidas (Las del group by)
df = environment.dataFrame.iloc[:, -countGr:]
c = self.colData.execute(environment)
x = c.value
x =
|
pd.DataFrame(x)
|
pandas.DataFrame
|
# coding: utf-8
import glob
import os
import pandas as pd
import numpy as np
import shutil
from pathlib import Path
sddir = "/media/x/cosmicsense/data/sponheim_rhinluch/sd"
remotedir = "/media/x/cosmicsense/data/sponheim_rhinluch/remote"
trgdir = "/media/x/cosmicsense/data/sponheim_rhinluch/merged"
tmpfile = "tmpfile.txt"
tmpfile2 = "tmpfile2.txt"
ids = ["spo", "rhi"]
crns = {
"spo": {"remotepattern": "sponheim_Data_*.spo_*.txt",
"sdpattern": "na",
"colnames": ["rec_id", "datetime", "press4", "press1", "temp1", "relhum1", "volt", "counts1", "nsecs1", "temp2", "relhum2", "sdiAdr", "sdi1_1", "sdi1_2", "sdi1_3", "sdi1_4", "sdi1_5", "sdi1_6"],
},
"rhi": {"remotepattern": "rhinluch_Data*.rhi_*.txt",
"sdpattern": "na",
"colnames": ["rec_id", "datetime", "press1", "temp1", "relhum1", "volt", "counts1", "nsecs1", "sdiAdr", "sdi1_1", "sdi1_2", "sdi1_3", "sdi1_4"],
}
}
for i, id in enumerate(ids):
print("-------------")
print("Processing %s" % id)
try:
os.remove(tmpfile)
os.remove(tmpfile2)
except:
pass
# REMOTE FILES
print("Remote: ", end="")
searchdir = os.path.join(remotedir,"%s" % id, crns[id]["remotepattern"])
remotefiles = glob.glob(searchdir, recursive=True)
print("found %d files" % len(remotefiles))
for name in remotefiles:
print(".", end="")
fin = open(name, "r")
body = fin.read()
# replace comment character
body = body.replace("//", "#")
# replace zombie line endings
body = body.replace(",\r\n", "\r\n")
# comment out these lines
body = body.replace("CRS#1:", "#CRS#1")
body = body.replace("CRS#2:", "#CRS#2")
myfile = open(tmpfile, 'a')
myfile.write(body)
myfile.close()
print("")
# SD
print("SD: ", end="")
searchdir = os.path.join(sddir, "%s" % id)
sdfiles = [filename for filename in Path(searchdir).glob("**/"+crns[id]["sdpattern"])]
print("found %d files" % len(sdfiles))
for name in sdfiles:
print(".", end="")
fin = open(name, "r")
body = fin.read()
# replace comment character
body = body.replace("//", "#")
# replace zombie line endings
body = body.replace(",\r\n", "\r\n")
body = body.replace(",\n", "\n")
# comment out these lines
body = body.replace("CRS#1:", "#CRS#1")
body = body.replace("CRS#2:", "#CRS#2")
myfile = open(tmpfile, 'a')
myfile.write(body)
myfile.close()
print("")
if "colnames2" in crns[id].keys():
# Read all lines. potentially varying no of columns
myfile = open(tmpfile, 'r')
lines = myfile.readlines()
myfile.close()
# Write in seperate files
myfile = open(tmpfile, 'w')
myfile2 = open(tmpfile2, 'w')
for line in lines:
split = line.split(",")
if len(split)==len(crns[id]["colnames"]):
myfile.write(line+"\n")
if len(split)==len(crns[id]["colnames2"]):
myfile2.write(line+"\n")
myfile.close()
myfile2.close()
# MERGE
df =
|
pd.read_csv(tmpfile, sep=",", comment="#", header=None, error_bad_lines=False, warn_bad_lines=True)
|
pandas.read_csv
|
from collections import deque
import pandas as pd
import datetime
import os
import numpy as np
from torch_rl.utils import Parameters, prRed, Callback, timestamp
import glob
import shutil
class TrainingStatsCallback(Callback):
"""
Keeps training statistics, writes them to a file and loads them.
"""
def __init__(self, episode_window=10, step_window=10,
sample_rate_episodes=1, sample_rate_steps=None, save_rate=10,
save_destination=None, hyperparameters=None, stepwise=False, episodewise=True):
super(TrainingStatsCallback, self).__init__(episodewise=episodewise, stepwise=stepwise)
self.episode_window = episode_window
self.episode_reward_buffer = deque(maxlen=episode_window)
self.step_rewards = []
self.episode_rewards = []
self.sample_rate_episodes=sample_rate_episodes
self.sample_rate_steps = sample_rate_steps
self.rewards = []
self.moving_average_rewards = []
self.save_rate = save_rate
self.hyperparameters = hyperparameters
if save_destination is None:
self.save_destination = 'training_stats_' + timestamp()
else:
self.save_destination = os.path.join(save_destination, "training_stats")
if os.path.isdir(self.save_destination):
prRed(self.save_destination + " is a directory already, delete for new training data? y/n")
res = input()
res = res.lower()
if res == 'y':
shutil.rmtree(self.save_destination)
else:
raise Exception("Start training with another save destination name.")
os.makedirs(self.save_destination)
# Save hyperparameters to a file
self.save_hyperparameters()
# Pandas data frames
self.episode_data = None
self.step_data = None
def save_hyperparameters(self):
if self.hyperparameters:
if not isinstance(self.hyperparameters, Parameters):
raise Exception("User Parameters from torch_rl.utils.Parameters to store parameters")
df = pd.DataFrame.from_records(self.hyperparameters.__dict__)
df.to_pickle(os.path.join(self.save_destination, "parameters.cfg"))
def _step(self, episode, step, reward,**kwargs):
kwargs["reward"] = reward
kwargs['episode'] = episode
kwargs['step'] = step
df = pd.DataFrame.from_records([kwargs], index=['step'])
if not self.step_data is None:
self.step_data = pd.concat([self.step_data, df])
else:
self.step_data = df
if episode % self.save_rate == 0:
self.save()
def _episode_step(self, **kwargs):
self.episode_reward_buffer.append(kwargs['episode_reward'])
episode = kwargs['episode']
kwargs["mvavg_reward"] = np.mean(self.episode_reward_buffer)
df = pd.DataFrame.from_records([kwargs], index=['episode'])
if not self.episode_data is None:
self.episode_data =
|
pd.concat([self.episode_data, df])
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# # Coding Exercises (Part 1)
# ## Full Data Workflow A-Z: Merging, Joining, Concatenating
# ### Exercise 12: Merging, joining, aligning and concatenating Data
# Now, you will have the opportunity to analyze your own dataset. <br>
# __Follow the instructions__ and insert your code! You are either requested to
# - Complete the Code and __Fill in the gaps__. Gaps are marked with "__---__" and are __placeholders__ for your code fragment.
# - Write Code completely __on your own__
# In some exercises, you will find questions that can only be answered, if your code is correct and returns the right output! The correct answer is provided below your coding cell. There you can check whether your code is correct.
# If you need a hint, check the __Hints Section__ at the end of this Notebook. Exercises and Hints are numerated accordingly.
# If you need some further help or if you want to check your code, you can also check the __solutions notebook__.
# ### Have Fun!
# --------------------------------------------------------------------------------------------------------------
# ## Option 1: Self_guided
# ### Concatenating DataFrames vertically
# __Import__ the cars dataset (with cars from usa and europe) from the csv-file __cars_clean.csv__. <br>
# Also __import__ the csv-file __cars_jap.csv__ (with cars from japan) and __concatenate__ both DataFrames __vertically__! <br>
# __Save__ the __concatenated DataFrame__ in the variable __cars_all__! <br>
# Finally, __sort__ cars_all by the model_year from __low to high__!
# ### Left Join
# __Import__ the csv-files __summer.csv__ (as summer) and __dictionary.csv__ (as dic) which contains the __full country name__ for the olympic country codes as well as __population__ and __gdp__ statistics for some countries.<br>
#
# __"Copy and paste"__ the __full country name__, __population__ and __gdp__ from the dic DataFrame __into the summer DataFrame__ with a __Left Join__!<br>
# __Save__ the new merged DataFrame in the variable __summer_new__!<br>
#
# __Inspect__ summer_new and determine the __olympic country codes__ for which the dic DataFrame does __not provide__ any information!
# ### Arithmetic operations between DataFrames / Alignment
# __Import__ the csv-files __ath_2008.csv__ and __ath_2012.csv__ with all medals winners in the Sport __Athletics__ in the Editions __2008__ and __2012__.
# For __all Athletes__ in the two DataFrames, __aggregate/add__ the total number of __Gold__, __Silver__ and __Bronze__ Medals over both editions! __Save__ the aggregated DataFrame in the variable __add__. (Hint: add should contain an index with the Athlete names and three columns, Gold, Silver, Bronze)
# __Sort__ add by Gold, Silver, Bronze from __high to low__! Change datatype to __integer__, if necessary! The first Athlete in your DataFrame should be ... no surprise ... Usain Bolt with 6 Gold and 0 Silver and Bronze Medals.
# -------------------------------------
# ## Option 2: Guided and Instructed
# # STOP HERE, IF YOU WANT TO DO THE EXERCISE ON YOUR OWN!
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# In[ ]:
#run the cell
import pandas as pd
# ### Concatenating DataFrames vertically
# In[ ]:
#run the cell
cars = pd.read_csv("cars_clean.csv")
# __Inspect__ the __cars__ DataFrame!
# In[ ]:
#run the cell
cars.head()
# In[ ]:
#run the cell
cars.tail()
# In[ ]:
#run the cell
cars.info()
# __Inspect__ the cars_jap DataFrame!
# In[ ]:
#run the cell
cars_jap =
|
pd.read_csv("cars_jap.csv")
|
pandas.read_csv
|
import os
import glob
import json
import argparse
import datetime
import numpy as np
import pandas as pd
def get_args():
parser = argparse.ArgumentParser(description="spotify")
parser.add_argument("--path", type=str, default="upload", help="input files")
parser.add_argument("--timeZone", type=str, default="UTC", help="time zone")
parser.add_argument("--files", nargs="+")
return parser.parse_args()
def ms_to_hour(ms, hour=False):
seconds = (ms / 1000) % 60
minutes = (ms / (1000 * 60)) % 60
hours = (ms / (1000 * 60 * 60)) % 24
if hour:
return "%02d:%02d:%02d" % (hours, minutes, seconds)
return "%02d:%02d" % (minutes, seconds)
def top3(spotify_df):
top_monthly_df =
|
pd.DataFrame(spotify_df)
|
pandas.DataFrame
|
# Based on <NAME>'s deep learning model:
# https://www.kaggle.com/alexanderkireev/experiments-with-imbalance-nn-architecture?scriptVersionId=3419429/code
import argparse
import numpy as np
import pandas as pd
import gc
import logging
from keras.layers import Input, Embedding, Dense, Flatten, Dropout, concatenate
from keras.layers import BatchNormalization, SpatialDropout1D, Conv1D
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.models import Model
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
'''import matplotlib
import matplotlib.pyplot as plt
from IPython.display import clear_output'''
import trainer.preprocessing as pp
from tensorflow.python.lib.io import file_io
def make_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--train-file', help='Path to training data', required=False)
parser.add_argument(
'--valid-file', help='Path to validation data', required=False)
parser.add_argument(
'--test-file', help='Path to test data', required=False)
parser.add_argument(
'--sub-file', help='Path to write the submission in a file', required=False)
parser.add_argument(
'--job-dir',
help='Directory where to store checkpoints and exported models.',
default='.')
'''parser.add_argument(
'--log', help='Logging level', default=logging.DEBUG,
action=StoreLoggingLevel)'''
return parser
'''class PlotLosses(Callback):
"""
Plot the loss and the accuracy of the training set and the validation set
"""
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.i += 1
clear_output(wait=True)
plt.plot(self.x, self.losses, label="loss")
plt.plot(self.x, self.val_losses, label="val_loss")
plt.grid()
plt.legend()
plt.show();'''
def get_values(df):
"""
Return a list of the column's name
"""
return df.columns.values.tolist()
def get_keras_data(dataset):
"""
Split the data according to the column into an numpy array
"""
variables = get_values(dataset)
X = dict([(var, np.array(dataset[var])) for var in variables])
return X
def NN(train_df, val_df, test_df, sub_path):
"""
Main function of the Neural Network
"""
logging.info('Neural Network preprocessing')
if train_df is not None:
y_train = train_df['is_attributed'].values
train_df = train_df.drop('is_attributed', axis = 1)
train_df = train_df.drop('attributed_time', axis = 1)
#train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing
gc.collect()
if val_df is not None:
y_val = val_df['is_attributed'].values
val_df = val_df.drop(['is_attributed'], axis = 1)
val_df = get_keras_data(val_df)
list_variables = get_values(train_df)
print(list_variables)
logging.info('Model is creating...')
max_var = []
if test_df is not None:
for i, var in enumerate(list_variables):
max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1)
train_df = get_keras_data(train_df)
else:
for i, var in enumerate(list_variables):
max_var.append(train_df[var].max()+1)
train_df = get_keras_data(train_df)
emb_n = 50
dense_n = 1000
in_var = []
emb_var = []
for i, var in enumerate(list_variables):
in_var.append(Input(shape=[1], name = var))
emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))
fe = concatenate([emb for emb in emb_var])
s_dout = SpatialDropout1D(0.2)(fe)
fl1 = Flatten()(s_dout)
#conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)
dl = Dense(100)(s_dout)
fl2 = Flatten()(dl)
concat = concatenate([(fl1), (fl2)])
x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))
x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))
outp = Dense(1,activation='sigmoid')(x)
model = Model(inputs=[var for var in in_var], outputs=outp)
logging.info('Model is compiling...')
batch_size = 50000
epochs = 2 #12 for sample_train
exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1
steps = int(len(list(train_df)[0]) / batch_size) * epochs
lr_init, lr_fin = 0.002, 0.0002
lr_decay = exp_decay(lr_init, lr_fin, steps)
optimizer_adam = Adam(lr=lr_init, decay=lr_decay)
model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])
model.summary()
logging.info('Model is training...')
model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)
del train_df, y_train; gc.collect()
if val_df is not None:
logging.info('Prediction on validation set')
predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)
del val_df; gc.collect()
predictions_NN_prob = predictions_NN_prob[:,0]
predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)
acc_NN = accuracy_score(y_val, predictions_NN)
print('Overall accuracy of Neural Network model:', acc_NN)
if test_df is not None:
logging.info('Prediction on test set')
sub =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import cvxpy as cp
from tqdm import tqdm
import random
import time
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from numpy import linalg
from itertools import accumulate
import pandas as pd
from utils import solve, model
import argparse
def train(layer, true, iters=1000, choice=1, random_seed=1, show=False):
torch.manual_seed(random_seed)
np.random.seed(random_seed)
pn_t = torch.tensor([0.05]).double().requires_grad_(True)
a1_t = torch.tensor([0.5]).double().requires_grad_(True)
a3_t = torch.tensor([0.5]).double().requires_grad_(True)
max_theta_t = torch.tensor([18.5]).double().requires_grad_(True)
min_theta_t = torch.tensor([18]).double().requires_grad_(True)
max_power_t = torch.tensor([1.0]).double().requires_grad_(True)
variables = [pn_t,a1_t,a3_t,max_theta_t,min_theta_t,max_power_t]
results = []
record_variables = []
optimizer = torch.optim.Adam(variables, lr=0.15)
for i in range(iters):
pred = layer(*variables)
if choice==1:
loss = nn.MSELoss()(true[0], pred[0]) + nn.MSELoss()(true[1], pred[1])
else:
loss = nn.MSELoss()(true[0], pred[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
pn_t.data = torch.clamp(pn_t.data, min=0.01, max=0.1)
a1_t.data = torch.clamp(a1_t.data, min=0.01, max=1)
a3_t.data = torch.clamp(a3_t.data, min=0.01, max=1)
max_power_t.data = torch.clamp(max_power_t.data, min=0.1, max=10)
results.append(loss.item())
if i % 100==0: print("(iter %d) loss: %g " % (i, results[-1]))
if i == 50:
optimizer.param_groups[0]["lr"] = 0.1
if i == 200:
optimizer.param_groups[0]["lr"] = 0.05
if i == 800:
optimizer.param_groups[0]["lr"] = 0.01
if show:
im = plt.plot(results,color='gray')
anno = plt.annotate(f'step:{i}\n loss={loss}', xy=(0.85, 0.9), xycoords='axes fraction',color='black')
plt.axis("equal")
plt.pause(0.001)
anno.remove()
record_variables.append([v.detach().numpy().copy() for v in variables])
return [v.detach().numpy().copy() for v in variables], record_variables
def experiment(layer,seed1,theta_0, price, amb, choice, seed2, show, T=24*5):
np.random.seed(seed1)
price = price_data[:T]
amb = amb_data[:T]
C_th = 10 * np.random.uniform(0.9,1.1)
R_th = 2 * np.random.uniform(0.9,1.1)
P_n = 5 * np.random.uniform(0.9,1.1)
eta = 2.5 * np.random.uniform(0.9,1.1)
theta_r = 20 * np.random.uniform(0.9,1.1)
Delta = np.random.uniform(0.9,1.1)
pn_value = 0.02 # you can change it as you like
a1_value = round(1 - 1/(R_th*C_th),4)
a2_value = eta*R_th
a3_value = round((1-a1_value)*a2_value,6)
max_theta = round(theta_r + Delta,3)
min_theta = round(theta_r - Delta,3)
max_power = round(P_n,3)
params = {'pn':pn_value, 'a1':a1_value, 'a2':a2_value, 'a3': a3_value,
'max_theta':max_theta, 'min_theta':min_theta, 'max_power':max_power}
print(params)
true = solve(price, amb, T, pn_value, a1_value, a3_value, max_theta, min_theta, max_power, theta_0, tensor=True)
variables, record = train(layer, true, 600, choice, seed2, show)
pn_ = ((variables[0][0] - pn_value)**2)**0.5
a1_ = ((variables[1][0] - a1_value)**2)**0.5
a3_ = ((variables[2][0] - a3_value)**2)**0.5
max_theta_ = ((variables[3][0] - max_theta)**2)**0.5
min_theta_ = ((variables[4][0] - min_theta)**2)**0.5
max_power_ = ((variables[5][0] - max_power)**2)**0.5
print(pn_,a1_,a3_,max_theta_,min_theta_,max_power_)
return [v[0] for v in variables], [pn_value,a1_value,a3_value,max_theta,min_theta,max_power], [pn_,a1_,a3_,max_theta_,min_theta_,max_power_]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--num", type=int, default=10, help="the number of experiments")
parser.add_argument("--save", type=bool, default=False, help="whether to save the result")
parser.add_argument("--show", type=bool, default=False, help="whether to show the real-time training loss")
parser.add_argument("--T", type=int, default=120, help="the length of the training data")
parser.add_argument("--seed", type=int, default=1, help="the training random seed")
parser.add_argument("--choice", type=int, default=1, help="1 for OptNet1 and 2 or OptNet2, indicated in the paper")
opts = parser.parse_args()
amb_data = np.array(pd.read_excel('dataset/input_data_pool.xlsx',sheet_name='theta_amb')['theta_amb'])
price_data = np.array(pd.read_excel('dataset/input_data_pool.xlsx',sheet_name='price')['price'])
#theta_0 = 21.64671372 # according to one history sample, you can change it as you like
theta_0 = 35.00
layer1 = model(price_data, amb_data, theta_0, opts.T)
record = []
record_variable = []
record_true = []
for i in range(opts.num):
try:
r1, r2, r3 = experiment(layer1, i, theta_0, price_data, amb_data, opts.choice, opts.seed, opts.show, opts.T)
except Exception as e:
continue
record_variable.append(r1.copy())
record_true.append(r2.copy())
record.append(r3.copy())
estimated_p =
|
pd.DataFrame(data=record_variable, columns=['pn','a1','a3','max_t','min_t','max_p'])
|
pandas.DataFrame
|
import pickle
import numpy as np
import pandas as pd
from torch import Tensor
from torch.utils.data import TensorDataset, DataLoader
from scipy.stats import jarque_bera
from statsmodels.tsa.stattools import adfuller
def save_file(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print("{} saved".format(filename))
def load_file(filename):
with open(filename, "rb") as f:
return pickle.load(f)
def mean_absolute_percentage_error(y_true, y_pred):
zeros = np.where(y_true==0)
y_truebis = np.delete(y_true, zeros)
y_predbis = np.delete(y_pred, zeros)
return( 100 * np.mean(np.abs((y_truebis - y_predbis)/y_truebis)) )
def theilU(y_true, y_pred):
return np.sqrt(np.mean((y_pred - y_true)**2)) / (np.sqrt(np.mean(y_pred**2)) + np.sqrt(np.mean(y_true**2)))
# Convert Dataframe to DataLoader
def DataFrame2DataLoader(df, features_col, target_col, batch_size=4, normalize=False, mu=None, sigma=None, shuffle=False):
tmpdf = df.copy()
try:
del tmpdf["Date"]
except:
pass
if normalize:
tmpdf = (tmpdf - mu)/sigma
target = tmpdf[target_col]
features = tmpdf[features_col]
dataset = TensorDataset(Tensor(np.array(features)), Tensor(np.array(target)))
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
return dataloader
# Jarque-Bera result, null hypothesis: the serie follows a normal distribution
def jarque_bera_t_stat(x):
return (list(jarque_bera(x))[0]) # t stat
def jarque_bera_p_value(x):
return (list(jarque_bera(x))[1]) # p value
# Augmented Dickey-Fuller, null hypothesis: the serie is stationnary
def adf_t_stat(x):
return (list(adfuller(x))[0]) # t stat
def adf_p_value(x):
return (list(adfuller(x))[1]) # p value
# Pesaran-Timmermann test: null hypothesis: the model under study has no power on forecasting the relevant ETF return series
def PT_test(y_true, y_pred):
n = len(y_true)
dy = y_true.copy()
dy[dy < 0] = 0
dy[dy > 0] = 1
py = np.mean(dy)
qy = (py*(1-py))/n
dz = y_pred.copy()
dz[dz < 0] = 0
dz[dz > 1] = 1
pz = np.mean(dz)
qz = (pz*(1-pz))/n
p = py*pz + (1-py)*(1-pz)
v = (p*(1-p))/n
w = ((2*py-1)**2)*qz + ((2*pz-1)**2)*qy + 4*qy*qz
dyz = y_true*y_pred.copy()
dyz[dyz < 0] = 0
dyz[dyz > 0] = 1
pyz = np.mean(dyz)
PT = (pyz - p)/(v-w)**0.5
return(PT)
# Diebold-Mariano test: null hypothesis equal predictive accuracy between two forecasts
# copied from <NAME> https://github.com/johntwk/Diebold-Mariano-Test/blob/master/dm_test.py
def dm_test(actual_lst, pred1_lst, pred2_lst, h = 1, crit="MSE", power = 2):
# Routine for checking errors
def error_check():
rt = 0
msg = ""
# Check if h is an integer
if (not isinstance(h, int)):
rt = -1
msg = "The type of the number of steps ahead (h) is not an integer."
return (rt,msg)
# Check the range of h
if (h < 1):
rt = -1
msg = "The number of steps ahead (h) is not large enough."
return (rt,msg)
len_act = len(actual_lst)
len_p1 = len(pred1_lst)
len_p2 = len(pred2_lst)
# Check if lengths of actual values and predicted values are equal
if (len_act != len_p1 or len_p1 != len_p2 or len_act != len_p2):
rt = -1
msg = "Lengths of actual_lst, pred1_lst and pred2_lst do not match."
return (rt,msg)
# Check range of h
if (h >= len_act):
rt = -1
msg = "The number of steps ahead is too large."
return (rt,msg)
# Check if criterion supported
if (crit != "MSE" and crit != "MAPE" and crit != "MAD" and crit != "poly"):
rt = -1
msg = "The criterion is not supported."
return (rt,msg)
# Check if every value of the input lists are numerical values
# from re import compile as re_compile
# comp = re_compile("^\d+?\.\d+?$")
# def compiled_regex(s):
# """ Returns True is string is a number. """
# if comp.match(s) is None:
# return s.isdigit()
# return True
# for actual, pred1, pred2 in zip(actual_lst, pred1_lst, pred2_lst):
# is_actual_ok = compiled_regex(str(abs(actual)))
# is_pred1_ok = compiled_regex(str(abs(pred1)))
# is_pred2_ok = compiled_regex(str(abs(pred2)))
# if (not (is_actual_ok and is_pred1_ok and is_pred2_ok)):
# msg = "An element in the actual_lst, pred1_lst or pred2_lst is not numeric."
# rt = -1
# return (rt,msg)
return (rt,msg)
# Error check
error_code = error_check()
# Raise error if cannot pass error check
if (error_code[0] == -1):
raise SyntaxError(error_code[1])
return
# Import libraries
from scipy.stats import t
import collections
import pandas as pd
import numpy as np
# Initialise lists
e1_lst = []
e2_lst = []
d_lst = []
# convert every value of the lists into real values
actual_lst = pd.Series(actual_lst).apply(lambda x: float(x)).tolist()
pred1_lst = pd.Series(pred1_lst).apply(lambda x: float(x)).tolist()
pred2_lst = pd.Series(pred2_lst).apply(lambda x: float(x)).tolist()
# Length of lists (as real numbers)
T = float(len(actual_lst))
# construct d according to crit
if (crit == "MSE"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append((actual - p1)**2)
e2_lst.append((actual - p2)**2)
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "MAD"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(abs(actual - p1))
e2_lst.append(abs(actual - p2))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "MAPE"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(abs((actual - p1)/actual))
e2_lst.append(abs((actual - p2)/actual))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "poly"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(((actual - p1))**(power))
e2_lst.append(((actual - p2))**(power))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
# Mean of d
mean_d =
|
pd.Series(d_lst)
|
pandas.Series
|
"""
Dataframe-like class to hold general energy-related timeseries; either volume ([MW] or
[MWh]), price ([Eur/MWh]) or both; in all cases there is a single timeseries for each.
"""
from __future__ import annotations
from . import single_helper
from .base import PfLine
from .. import changefreq
from typing import Dict, Iterable, Union
import pandas as pd
import numpy as np
class SinglePfLine(PfLine):
"""Portfolio line without children. Has a single dataframe; .children is the empty
dictionary.
Parameters
----------
data: Any
Generally: object with one or more attributes or items ``w``, ``q``, ``r``, ``p``;
all timeseries. Most commonly a ``pandas.DataFrame`` or a dictionary of
``pandas.Series``, but may also be e.g. another PfLine object.
Returns
-------
SinglePfLine
Notes
-----
* If the timeseries or values in ``data`` do not have a ``pint`` data type, the
standard units are assumed (MW, MWh, Eur, Eur/MWh).
* If the timeseries or values in ``data`` do have a ``pint`` data type, they are
converted into the standard units.
"""
def __new__(cls, data):
# Catch case where data is already a valid class instance.
if isinstance(data, SinglePfLine):
return data # TODO: return copy
# Otherwise, do normal thing.
return super().__new__(cls, data)
def __init__(self, data: Union[PfLine, Dict, pd.DataFrame, pd.Series]):
self._df = single_helper.make_dataframe(data)
# Implementation of ABC methods.
@property
def children(self) -> Dict:
return {}
@property
def index(self) -> pd.DatetimeIndex:
return self._df.index
@property
def w(self) -> pd.Series:
if self.kind == "p":
return pd.Series(np.nan, self.index, name="w", dtype="pint[MW]")
else:
return pd.Series(self.q / self.index.duration, name="w").pint.to("MW")
@property
def q(self) -> pd.Series:
if self.kind == "p":
return pd.Series(np.nan, self.index, name="q", dtype="pint[MWh]")
else:
return self._df["q"]
@property
def p(self) -> pd.Series:
if self.kind == "q":
return pd.Series(np.nan, self.index, name="p", dtype="pint[Eur/MWh]")
elif self.kind == "all":
return pd.Series(self.r / self.q, name="p").pint.to("Eur/MWh")
else: # self.kind == 'p'
return self._df["p"]
@property
def r(self) -> pd.Series:
if self.kind != "all":
return
|
pd.Series(np.nan, self.index, name="r", dtype="pint[Eur]")
|
pandas.Series
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# QuantStats: Portfolio analytics for quants
# https://github.com/ranaroussi/quantstats
#
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as _pd
import numpy as _np
from math import sqrt as _sqrt, ceil as _ceil
from datetime import (
datetime as _dt, timedelta as _td
)
from base64 import b64encode as _b64encode
import re as _regex
from tabulate import tabulate as _tabulate
from . import (
__version__, stats as _stats,
utils as _utils, plots as _plots
)
try:
from IPython.core.display import (
display as iDisplay, HTML as iHTML
)
except ImportError:
pass
def _get_trading_periods(trading_year_days=252):
half_year = _ceil(trading_year_days/2)
return trading_year_days, half_year
def html(returns, benchmark=None, rf=0., grayscale=False,
title='Strategy Tearsheet', output=None, compounded=True,
trading_year_days=252, download_filename='quantstats-tearsheet.html',
figfmt='svg', template_path=None):
win_year, win_half_year = _get_trading_periods(trading_year_days)
if output is None and not _utils._in_notebook():
raise ValueError("`file` must be specified")
tpl = ""
with open(template_path or __file__[:-4] + '.html') as f:
tpl = f.read()
f.close()
date_range = returns.index.strftime('%e %b, %Y')
tpl = tpl.replace('{{date_range}}', date_range[0] + ' - ' + date_range[-1])
tpl = tpl.replace('{{title}}', title)
tpl = tpl.replace('{{v}}', __version__)
mtrx = metrics(returns=returns, benchmark=benchmark,
rf=rf, display=False, mode='full',
sep=True, internal="True",
compounded=compounded,
trading_year_days=trading_year_days)[2:]
mtrx.index.name = 'Metric'
tpl = tpl.replace('{{metrics}}', _html_table(mtrx))
tpl = tpl.replace('<tr><td></td><td></td><td></td></tr>',
'<tr><td colspan="3"><hr></td></tr>')
tpl = tpl.replace('<tr><td></td><td></td></tr>',
'<tr><td colspan="2"><hr></td></tr>')
if benchmark is not None:
yoy = _stats.compare(returns, benchmark, "A", compounded=compounded)
yoy.columns = ['Benchmark', 'Strategy', 'Multiplier', 'Won']
yoy.index.name = 'Year'
tpl = tpl.replace('{{eoy_title}}', '<h3>EOY Returns vs Benchmark</h3>')
tpl = tpl.replace('{{eoy_table}}', _html_table(yoy))
else:
# pct multiplier
yoy = _pd.DataFrame(
_utils.group_returns(returns, returns.index.year) * 100)
yoy.columns = ['Return']
yoy['Cumulative'] = _utils.group_returns(
returns, returns.index.year, True)
yoy['Return'] = yoy['Return'].round(2).astype(str) + '%'
yoy['Cumulative'] = (yoy['Cumulative'] *
100).round(2).astype(str) + '%'
yoy.index.name = 'Year'
tpl = tpl.replace('{{eoy_title}}', '<h3>EOY Returns</h3>')
tpl = tpl.replace('{{eoy_table}}', _html_table(yoy))
dd = _stats.to_drawdown_series(returns)
dd_info = _stats.drawdown_details(dd).sort_values(
by='max drawdown', ascending=True)[:10]
dd_info = dd_info[['start', 'end', 'max drawdown', 'days']]
dd_info.columns = ['Started', 'Recovered', 'Drawdown', 'Days']
tpl = tpl.replace('{{dd_info}}', _html_table(dd_info, False))
# plots
figfile = _utils._file_stream()
_plots.returns(returns, benchmark, grayscale=grayscale,
figsize=(8, 5), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, cumulative=compounded)
tpl = tpl.replace('{{returns}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.log_returns(returns, benchmark, grayscale=grayscale,
figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, cumulative=compounded)
tpl = tpl.replace('{{log_returns}}', _embed_figure(figfile, figfmt))
if benchmark is not None:
figfile = _utils._file_stream()
_plots.returns(returns, benchmark, match_volatility=True,
grayscale=grayscale, figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, cumulative=compounded)
tpl = tpl.replace('{{vol_returns}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.yearly_returns(returns, benchmark, grayscale=grayscale,
figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, compounded=compounded)
tpl = tpl.replace('{{eoy_returns}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.histogram(returns, grayscale=grayscale,
figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, compounded=compounded)
tpl = tpl.replace('{{monthly_dist}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.daily_returns(returns, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False)
tpl = tpl.replace('{{daily_returns}}', _embed_figure(figfile, figfmt))
if benchmark is not None:
figfile = _utils._file_stream()
_plots.rolling_beta(returns, benchmark, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
window1=win_half_year, window2=win_year,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False)
tpl = tpl.replace('{{rolling_beta}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.rolling_volatility(returns, benchmark, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, period=win_half_year,
trading_year_days=win_year)
tpl = tpl.replace('{{rolling_vol}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.rolling_sharpe(returns, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, period=win_half_year,
trading_year_days=win_year)
tpl = tpl.replace('{{rolling_sharpe}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.rolling_sortino(returns, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, period=win_half_year,
trading_year_days=win_year)
tpl = tpl.replace('{{rolling_sortino}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.drawdowns_periods(returns, grayscale=grayscale,
figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, compounded=compounded)
tpl = tpl.replace('{{dd_periods}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.drawdown(returns, grayscale=grayscale,
figsize=(8, 3), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False)
tpl = tpl.replace('{{dd_plot}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.monthly_heatmap(returns, grayscale=grayscale,
figsize=(8, 4), cbar=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, compounded=compounded)
tpl = tpl.replace('{{monthly_heatmap}}', _embed_figure(figfile, figfmt))
figfile = _utils._file_stream()
_plots.distribution(returns, grayscale=grayscale,
figsize=(8, 4), subtitle=False,
savefig={'fname': figfile, 'format': figfmt},
show=False, ylabel=False, compounded=compounded)
tpl = tpl.replace('{{returns_dist}}', _embed_figure(figfile, figfmt))
tpl = _regex.sub(r'\{\{(.*?)\}\}', '', tpl)
tpl = tpl.replace('white-space:pre;', '')
if output is None:
# _open_html(tpl)
_download_html(tpl, download_filename)
return
with open(output, 'w', encoding='utf-8') as f:
f.write(tpl)
def full(returns, benchmark=None, rf=0., grayscale=False,
figsize=(8, 5), display=True, compounded=True,
trading_year_days=252):
dd = _stats.to_drawdown_series(returns)
dd_info = _stats.drawdown_details(dd).sort_values(
by='max drawdown', ascending=True)[:5]
if not dd_info.empty:
dd_info.index = range(1, min(6, len(dd_info)+1))
dd_info.columns = map(lambda x: str(x).title(), dd_info.columns)
if _utils._in_notebook():
iDisplay(iHTML('<h4>Performance Metrics</h4>'))
iDisplay(metrics(returns=returns, benchmark=benchmark,
rf=rf, display=display, mode='full',
compounded=compounded,
trading_year_days=trading_year_days))
iDisplay(iHTML('<h4>5 Worst Drawdowns</h4>'))
if dd_info.empty:
iDisplay(iHTML("<p>(no drawdowns)</p>"))
else:
iDisplay(dd_info)
iDisplay(iHTML('<h4>Strategy Visualization</h4>'))
else:
print('[Performance Metrics]\n')
metrics(returns=returns, benchmark=benchmark,
rf=rf, display=display, mode='full',
compounded=compounded,
trading_year_days=trading_year_days)
print('\n\n')
print('[5 Worst Drawdowns]\n')
if dd_info.empty:
print("(no drawdowns)")
else:
print(_tabulate(dd_info, headers="keys",
tablefmt='simple', floatfmt=".2f"))
print('\n\n')
print('[Strategy Visualization]\nvia Matplotlib')
plots(returns=returns, benchmark=benchmark,
grayscale=grayscale, figsize=figsize, mode='full',
trading_year_days=trading_year_days)
def basic(returns, benchmark=None, rf=0., grayscale=False,
figsize=(8, 5), display=True, compounded=True,
trading_year_days=252):
if _utils._in_notebook():
iDisplay(iHTML('<h4>Performance Metrics</h4>'))
metrics(returns=returns, benchmark=benchmark,
rf=rf, display=display, mode='basic',
compounded=compounded,
trading_year_days=trading_year_days)
iDisplay(iHTML('<h4>Strategy Visualization</h4>'))
else:
print('[Performance Metrics]\n')
metrics(returns=returns, benchmark=benchmark,
rf=rf, display=display, mode='basic',
compounded=compounded,
trading_year_days=trading_year_days)
print('\n\n')
print('[Strategy Visualization]\nvia Matplotlib')
plots(returns=returns, benchmark=benchmark,
grayscale=grayscale, figsize=figsize, mode='basic',
trading_year_days=trading_year_days)
def metrics(returns, benchmark=None, rf=0., display=True,
mode='basic', sep=False, compounded=True,
trading_year_days=252, **kwargs):
win_year, _ = _get_trading_periods(trading_year_days)
if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
raise ValueError("`returns` must be a pandas Series, "
"but a multi-column DataFrame was passed")
if benchmark is not None:
if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
raise ValueError("`benchmark` must be a pandas Series, "
"but a multi-column DataFrame was passed")
blank = ['']
if isinstance(returns, _pd.DataFrame):
if len(returns.columns) > 1:
raise ValueError("`returns` needs to be a Pandas Series. DataFrame was passed")
returns = returns[returns.columns[0]]
df = _pd.DataFrame({"returns": _utils._prepare_returns(returns, rf)})
if benchmark is not None:
blank = ['', '']
df["benchmark"] = _utils._prepare_benchmark(
benchmark, returns.index, rf)
df = df.fillna(0)
# pct multiplier
pct = 100 if display or "internal" in kwargs else 1
# return df
dd = _calc_dd(df, display=(display or "internal" in kwargs))
metrics = _pd.DataFrame()
s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]}
s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]}
s_rf = {'returns': rf}
if "benchmark" in df:
s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0]
s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1]
s_rf['benchmark'] = rf
metrics['Start Period'] = _pd.Series(s_start)
metrics['End Period'] = _pd.Series(s_end)
metrics['Risk-Free Rate %'] = _pd.Series(s_rf)
metrics['Time in Market %'] = _stats.exposure(df) * pct
metrics['~'] = blank
if compounded:
metrics['Cumulative Return %'] = (
_stats.comp(df) * pct).map('{:,.2f}'.format)
else:
metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format)
metrics['CAGR%%'] = _stats.cagr(df, rf, compounded) * pct
metrics['~~~~~~~~~~~~~~'] = blank
metrics['Sharpe'] = _stats.sharpe(df, rf, win_year, True, win_year)
metrics['Sortino'] = _stats.sortino(df, rf, win_year, True, win_year)
metrics['Sortino/√2'] = metrics['Sortino'] / _sqrt(2)
metrics['~~~~~~~~'] = blank
metrics['Max Drawdown %'] = blank
metrics['Longest DD Days'] = blank
if mode.lower() == 'full':
ret_vol = _stats.volatility(df['returns'], win_year, True, win_year) * pct
if "benchmark" in df:
bench_vol = _stats.volatility(df['benchmark'], win_year, True, win_year) * pct
metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'])
else:
metrics['Volatility (ann.) %'] = [ret_vol]
metrics['Calmar'] = _stats.calmar(df)
metrics['Skew'] = _stats.skew(df)
metrics['Kurtosis'] = _stats.kurtosis(df)
metrics['~~~~~~~~~~'] = blank
metrics['Expected Daily %%'] = _stats.expected_return(df) * pct
metrics['Expected Monthly %%'] = _stats.expected_return(
df, aggregate='M') * pct
metrics['Expected Yearly %%'] = _stats.expected_return(
df, aggregate='A') * pct
metrics['Kelly Criterion %'] = _stats.kelly_criterion(df) * pct
metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df)
metrics['Daily Value-at-Risk %'] = -abs(_stats.var(df) * pct)
metrics['Expected Shortfall (cVaR) %'] = -abs(_stats.cvar(df) * pct)
metrics['~~~~~~'] = blank
metrics['Gain/Pain Ratio'] = _stats.gain_to_pain_ratio(df, rf)
metrics['Gain/Pain (1M)'] = _stats.gain_to_pain_ratio(df, rf, "M")
# if mode.lower() == 'full':
# metrics['GPR (3M)'] = _stats.gain_to_pain_ratio(df, rf, "Q")
# metrics['GPR (6M)'] = _stats.gain_to_pain_ratio(df, rf, "2Q")
# metrics['GPR (1Y)'] = _stats.gain_to_pain_ratio(df, rf, "A")
metrics['~~~~~~~'] = blank
metrics['Payoff Ratio'] = _stats.payoff_ratio(df)
metrics['Profit Factor'] = _stats.profit_factor(df)
metrics['Common Sense Ratio'] = _stats.common_sense_ratio(df)
metrics['CPC Index'] = _stats.cpc_index(df)
metrics['Tail Ratio'] = _stats.tail_ratio(df)
metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(df)
metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(df)
# returns
metrics['~~'] = blank
comp_func = _stats.comp if compounded else _np.sum
today = df.index[-1] # _dt.today()
metrics['MTD %'] = comp_func(
df[df.index >= _dt(today.year, today.month, 1)]) * pct
d = today - _td(3*365/12)
metrics['3M %'] = comp_func(
df[df.index >= _dt(d.year, d.month, d.day)]) * pct
d = today - _td(6*365/12)
metrics['6M %'] = comp_func(
df[df.index >= _dt(d.year, d.month, d.day)]) * pct
metrics['YTD %'] = comp_func(df[df.index >= _dt(today.year, 1, 1)]) * pct
d = today - _td(12*365/12)
metrics['1Y %'] = comp_func(
df[df.index >= _dt(d.year, d.month, d.day)]) * pct
d = today - _td(3*365)
metrics['3Y (ann.) %'] = _stats.cagr(
df[df.index >= _dt(d.year, d.month, d.day)
], 0., compounded) * pct
d = today - _td(5*365)
metrics['5Y (ann.) %'] = _stats.cagr(
df[df.index >= _dt(d.year, d.month, d.day)
], 0., compounded) * pct
d = today - _td(10*365)
metrics['10Y (ann.) %'] = _stats.cagr(
df[df.index >= _dt(d.year, d.month, d.day)
], 0., compounded) * pct
metrics['All-time (ann.) %'] = _stats.cagr(df, 0., compounded) * pct
# best/worst
if mode.lower() == 'full':
metrics['~~~'] = blank
metrics['Best Day %'] = _stats.best(df) * pct
metrics['Worst Day %'] = _stats.worst(df) * pct
metrics['Best Month %'] = _stats.best(df, aggregate='M') * pct
metrics['Worst Month %'] = _stats.worst(df, aggregate='M') * pct
metrics['Best Year %'] = _stats.best(df, aggregate='A') * pct
metrics['Worst Year %'] = _stats.worst(df, aggregate='A') * pct
# dd
metrics['~~~~'] = blank
for ix, row in dd.iterrows():
metrics[ix] = row
metrics['Recovery Factor'] = _stats.recovery_factor(df)
metrics['Ulcer Index'] = _stats.ulcer_index(df, rf)
metrics['Serenity Index'] = _stats.serenity_index(df, rf)
# win rate
if mode.lower() == 'full':
metrics['~~~~~'] = blank
metrics['Avg. Up Month %'] = _stats.avg_win(df, aggregate='M') * pct
metrics['Avg. Down Month %'] = _stats.avg_loss(df, aggregate='M') * pct
metrics['Win Days %%'] = _stats.win_rate(df) * pct
metrics['Win Month %%'] = _stats.win_rate(df, aggregate='M') * pct
metrics['Win Quarter %%'] = _stats.win_rate(df, aggregate='Q') * pct
metrics['Win Year %%'] = _stats.win_rate(df, aggregate='A') * pct
if "benchmark" in df:
metrics['~~~~~~~'] = blank
greeks = _stats.greeks(df['returns'], df['benchmark'], win_year)
metrics['Beta'] = [str(round(greeks['beta'], 2)), '-']
metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-']
# prepare for display
for col in metrics.columns:
try:
metrics[col] = metrics[col].astype(float).round(2)
if display or "internal" in kwargs:
metrics[col] = metrics[col].astype(str)
except Exception:
pass
if (display or "internal" in kwargs) and "%" in col:
metrics[col] = metrics[col] + '%'
try:
metrics['Longest DD Days'] = _pd.to_numeric(
metrics['Longest DD Days']).astype('int')
metrics['Avg. Drawdown Days'] = _pd.to_numeric(
metrics['Avg. Drawdown Days']).astype('int')
if display or "internal" in kwargs:
metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str)
metrics['Avg. Drawdown Days'] = metrics['Avg. Drawdown Days'
].astype(str)
except Exception:
metrics['Longest DD Days'] = '-'
metrics['Avg. Drawdown Days'] = '-'
if display or "internal" in kwargs:
metrics['Longest DD Days'] = '-'
metrics['Avg. Drawdown Days'] = '-'
metrics.columns = [
col if '~' not in col else '' for col in metrics.columns]
metrics.columns = [
col[:-1] if '%' in col else col for col in metrics.columns]
metrics = metrics.T
if "benchmark" in df:
metrics.columns = ['Strategy', 'Benchmark']
else:
metrics.columns = ['Strategy']
if display:
print(_tabulate(metrics, headers="keys", tablefmt='simple'))
return None
if not sep:
metrics = metrics[metrics.index != '']
return metrics
def plots(returns, benchmark=None, grayscale=False,
figsize=(8, 5), mode='basic', compounded=True,
trading_year_days=252):
win_year, win_half_year = _get_trading_periods(trading_year_days)
if mode.lower() != 'full':
_plots.snapshot(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]),
show=True, mode=("comp" if compounded else "sum"))
_plots.monthly_heatmap(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False,
compounded=compounded)
return
_plots.returns(returns, benchmark, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.6),
show=True, ylabel=False)
_plots.log_returns(returns, benchmark, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
if benchmark is not None:
_plots.returns(returns, benchmark, match_volatility=True,
grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
_plots.yearly_returns(returns, benchmark,
grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
_plots.histogram(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
_plots.daily_returns(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.3),
show=True, ylabel=False)
if benchmark is not None:
_plots.rolling_beta(returns, benchmark, grayscale=grayscale,
window1=win_half_year, window2=win_year,
figsize=(figsize[0], figsize[0]*.3),
show=True, ylabel=False)
_plots.rolling_volatility(
returns, benchmark, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.3), show=True, ylabel=False,
period=win_half_year)
_plots.rolling_sharpe(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.3),
show=True, ylabel=False, period=win_half_year)
_plots.rolling_sortino(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.3),
show=True, ylabel=False, period=win_half_year)
_plots.drawdowns_periods(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
_plots.drawdown(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.4),
show=True, ylabel=False)
_plots.monthly_heatmap(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
_plots.distribution(returns, grayscale=grayscale,
figsize=(figsize[0], figsize[0]*.5),
show=True, ylabel=False)
def _calc_dd(df, display=True):
dd = _stats.to_drawdown_series(df)
dd_info = _stats.drawdown_details(dd)
if dd_info.empty:
return _pd.DataFrame()
if "returns" in dd_info:
ret_dd = dd_info['returns']
else:
ret_dd = dd_info
# pct multiplier
pct = 1 if display else 100
dd_stats = {
'returns': {
'Max Drawdown %': ret_dd.sort_values(
by='max drawdown', ascending=True
)['max drawdown'].values[0] / pct,
'Longest DD Days': str(_np.round(ret_dd.sort_values(
by='days', ascending=False)['days'].values[0])),
'Avg. Drawdown %': ret_dd['max drawdown'].mean() / pct,
'Avg. Drawdown Days': str(_np.round(ret_dd['days'].mean()))
}
}
if "benchmark" in df and (dd_info.columns, _pd.MultiIndex):
bench_dd = dd_info['benchmark'].sort_values(by='max drawdown')
dd_stats['benchmark'] = {
'Max Drawdown %': bench_dd.sort_values(
by='max drawdown', ascending=True
)['max drawdown'].values[0] / pct,
'Longest DD Days': str(_np.round(bench_dd.sort_values(
by='days', ascending=False)['days'].values[0])),
'Avg. Drawdown %': bench_dd['max drawdown'].mean() / pct,
'Avg. Drawdown Days': str(_np.round(bench_dd['days'].mean()))
}
dd_stats =
|
_pd.DataFrame(dd_stats)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
"""
历史行情适配器(同步时间轴)
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import asyncio
import numpy as np
import pandas as pd
from threading import Thread
from quant.config import config
from quant.tasks import SingleTask
from quant.utils import tools, logger
from quant.utils.decorator import async_method_locker
from quant.gateway import ExchangeGateway
from quant.state import State
from quant.infra_api import InfraAPI
from quant.const import MARKET_TYPE_KLINE
from quant.market import Kline, Orderbook, Trade
#打印能完整显示
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 50000)
pd.set_option('max_colwidth', 1000)
class HistoryAdapter:
""" 历史行情适配器(同步时间轴)
"""
INTERVAL = 1*60*60*1000 #按每小时做为时间间隔读取数据库
gw_list = []
current_timestamp = None #回测环境中的"当前时间"
bind_strategy = None
def __init__(self, **kwargs):
self.gw_list.append(self)
@classmethod
def current_milli_timestamp(cls):
""" 获取回测环境中的"当前时间"
"""
return cls.current_timestamp
@classmethod
def new_loop_thread(cls, loop):
#运行事件循环,loop作为参数
asyncio.set_event_loop(loop)
loop.run_forever()
@classmethod
def initialize(cls, bind_strategy):
if config.backtest: #回测模式
cls._start_time = config.backtest["start_time"] #起始时间
cls._period_day = config.backtest["period_day"] #回测周期
cls._drive_type = config.backtest["drive_type"] #数据驱动方式:k线驱动,逐笔成交驱动,订单薄驱动
elif config.datamatrix: #datamatrix模式
cls._start_time = config.datamatrix["start_time"]
cls._period_day = config.datamatrix["period_day"]
cls._drive_type = config.datamatrix["drive_type"]
#----------------------------------------------------
ts = tools.datetime_str_to_ts(cls._start_time, fmt='%Y-%m-%d') #转换为时间戳
ts *= 1000 #转换为毫秒时间戳
cls.current_timestamp = ts
cls.bind_strategy = bind_strategy
@classmethod
async def start(cls):
""" 开始喂历史数据
"""
if config.backtest and int(cls._period_day) < 3: #回测时间不能少于三天
logger.error("error:", "回测时间不能少于三天", caller=cls)
return
thread_loop = asyncio.new_event_loop() #创建新的事件循环
run_loop_thread = Thread(target=cls.new_loop_thread, args=(thread_loop,), name="_work_thread_") #新起线程运行事件循环, 防止阻塞主线程
run_loop_thread.start() #运行线程,即运行协程事件循环
#在主线程中运行
for gw in cls.gw_list:
await gw.launch() #模拟交易接口连接初始化成功
#1.算出begin_time和end_time
#2.然后按1小时为一个单位调用 按drive_type 依次调用gw_list里面每个对象的gw.load_data(drive_type, begin_time, end_time)
#3.将上一步读取到的所有pandas合并成一个大的pandas, 然后按dt进行排序
#4.循环遍历这个大的pandas,依据记录里面的self对象,把数据逐条推送给相应BacktestTrader
#5.BacktestTrader里面将记录按drive_type转换为相应的结构 然后调用相应on_xxxx
#6.重复第二步
#备注:每次把时间dt记录下来 作为回测环境的当前时间
begin_time = tools.datetime_str_to_ts(cls._start_time, fmt='%Y-%m-%d') #转换为时间戳
begin_time *= 1000 #转换为毫秒时间戳
end_time = begin_time + int(cls._period_day)*24*60*60*1000 #回测结束毫秒时间戳
bt = begin_time
et = begin_time + cls.INTERVAL
while et <= end_time: #每次从数据库中读取一段时间的数据
pd_list = []
for gw in cls.gw_list:
for t in cls._drive_type:
df = await gw.load_data(t, bt, et)
if not df.empty:
pd_list.append(df)
#-------------------------------------
#设置下一个时间段
bt = et
et = bt + cls.INTERVAL
#-------------------------------------
#下面的函数一定要上锁,不然当回测的策略面有比如await这样的等待操作的话
#新的task就会被调度,那样回测数据时间轴就混乱了,所以得上同步锁
@async_method_locker("HistoryAdapter.start.task") #上锁
async def task(pd_list):
if pd_list:
#合成一个大表
df = pd.concat(pd_list, sort=False)
#按采集时间排序,这一步非常关键,它将各种类型历史数据按采集顺序安排在同一个时间轴上,和实盘数据顺序一致
sorted_df = df.sort_values(by='dt', kind='mergesort')
for _, row in sorted_df.iterrows():
cls.current_timestamp = int(row['dt']) #回测环境中的"当前时间"
gw = row['gw']
await gw.feed(row) #逐行将数据喂给其对应的虚拟适配器接口
elif pd_list == None:
#全部执行完毕,进行收尾工作
#通知虚拟适配器
for gw in cls.gw_list:
await gw.done()
#通知策略
await cls.bind_strategy.done()
#停止之前新建的事件循环线程
thread_loop.stop()
#----------------------------------------------------------------
#实测发现策略回测和读取mongodb数据库(io操作)这两个任务在同一个线程事件loop中并不能实现并发,
#所以将策略回测任务放到一个新的线程中,这样才能实现并发,不过这样就要注意策略因为是在
#新线程中一个新的事件循环中运行,没法直接读取数据库(mongodb数据库连接绑定了主线程事件loop),
#所以需要修改ModelApi,如果是回测模式就需要将数据库操作投递到主线程中的事件loop中执行.
#在主线程中运行
#SingleTask.run(task, pd_list)
#在工作线程中运行
asyncio.run_coroutine_threadsafe(task(pd_list), thread_loop)
#end while
#完成通知
asyncio.run_coroutine_threadsafe(task(None), thread_loop)
class VirtualTrader(HistoryAdapter, ExchangeGateway):
""" VirtualTrader module. You can initialize trader object with some attributes in kwargs.
"""
def __init__(self, **kwargs):
"""Initialize."""
self.cb = kwargs["cb"]
self._platform = kwargs.get("databind")
self._symbols = kwargs.get("symbols")
self._strategy = kwargs.get("strategy")
self._account = kwargs.get("account")
state = None
if not self._platform:
state = State(self._platform, self._account, "param platform miss")
elif not self._symbols:
state = State(self._platform, self._account, "param symbols miss")
elif not self._strategy:
state = State(self._platform, self._account, "param strategy miss")
if state:
logger.error(state, caller=self)
return
super(VirtualTrader, self).__init__(**kwargs)
async def load_data(self, drive_type, begin_time, end_time):
""" 从数据库中读取历史数据
"""
try:
if drive_type == "kline":
pd_list = []
for symbol in self._symbols:
r = await InfraAPI.get_klines_between(self._platform, symbol, begin_time, end_time)
if r:
#1.将r转换成pandas
#2.然后添加3列,一列为drive_type,一列为symbol,一列为当前类的self值,然后将从begin_dt这一列复制一个新列,名字叫做dt,方便以后统一排序
#3.pd_list.append(pandas)
df = pd.DataFrame(r)
df["drive_type"] = drive_type
df["symbol"] = symbol
df["gw"] = self
df["dt"] = df["begin_dt"]
del df["_id"]
pd_list.append(df)
#将pd_list的所有pandas按行合并成一个大的pandas
#然后return这个大的pandas
if pd_list:
return pd.concat(pd_list)
else:
return pd.DataFrame()
elif drive_type == "trade":
pd_list = []
for symbol in self._symbols:
r = await InfraAPI.get_trades_between(self._platform, symbol, begin_time, end_time)
if r:
#1.将r转换成pandas
#2.然后添加3列,一列为drive_type,一列为symbol,一列为当前类的self值
#3.pd_list.append(pandas)
df =
|
pd.DataFrame(r)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
#get_ipython().magic('load_ext autoreload')
#get_ipython().magic('reload_ext autoreload')
import requests
import lxml.html as hl
from xml.etree import ElementTree
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from queue import Queue, Empty
from urllib.parse import urljoin, urlparse
import urllib.robotparser
import string
import json
import pickle
import re
import unicodedata
from unidecode import unidecode
from itertools import chain
from collections import Counter
from urllib.parse import unquote
import operator
from matplotlib import pyplot as plt
import math
import statistics
import WOSutilities as wosutil
from nameparser import HumanName
import name_tools
import enchant
d=enchant.Dict('en_US')
import imp
#import load_data as load_data
get_ipython().run_cell_magic('bash', '', 'jupyter nbconvert Nobel_prize_crawl.ipynb --to script')
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except (TypeError, NameError): # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def copytofile(raw_html,filename):
with open(filename, 'wb') as outfile:
outfile.write(raw_html)
#retrieve nobel prize lauarates from main WIKI page
nobel_prize_page='https://en.wikipedia.org/wiki/List_of_Nobel_laureates_in_Physics'
page = requests.get(nobel_prize_page)
doc = lh.fromstring(page.content)
list_of_nobel_prize_winners=[]
tr_elements=doc.xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr')
prev=None
#print(tr_elements)
for each_tr_element in tr_elements:
winner_href=None
winner_title=None
year=None
#print(each_tr_element)
td_elements=each_tr_element.xpath('.//td')
if td_elements:
if td_elements[0].xpath('boolean(.//a[contains(@class,"image")])') is False:
year=td_elements[0].text
year=year.strip("\n")
# for shared prices in a year
if year == '' or year == '–':
year=prev
prev=year
else:
year=prev
th_elements=each_tr_element.xpath('.//th')
if th_elements:
winner_href=th_elements[0].xpath('./a/@href')
winner_title=th_elements[0].xpath('./a/@title')
if winner_href and winner_title:
list_of_nobel_prize_winners.append([int(year),re.sub(r"\(chemist\)|\(physicist\)",'',clean_data.strip_accents(winner_title[0])),winner_href[0],parse_web.urlCanonicalization(winner_href[0], base_url=nobel_prize_page)])
#creating dataframe with winners,year they were awarded and url of the winner page
nobel_prize_winners=pd.DataFrame(list_of_nobel_prize_winners,columns=['Year','Name','Url','Cannonicalized_Url'])
#to retrieve all information relevant information available in the winner page in WIKI
def update_winner_information(prize_type,prize_winners_dataframe,path_to_store_crawled_info):
winner_wiki_information={}
doc_num=0
count=0
visited_seed=set()
for index,row in prize_winners_dataframe.iterrows():
count=count+1
url=row['Cannonicalized_Url']
if url in visited_seed or not parse_web.ispolite(url):
continue
print(row['Name'])
visited_seed.add(url)
page = requests.get(url)
doc_num=doc_num+1
raw_html=page.content
doc = lh.fromstring(page.content)
path=path_to_store_crawled_info+'/'+prize_type+'-document-{0}'
copytofile(raw_html,path.format(doc_num))
winner_wiki_information.update(parse_web.get_wiki_information(prize_type,doc_num,doc))
return winner_wiki_information
nobel_winner_wiki_information=update_winner_information('nobel',nobel_prize_winners,'/home/apoorva_kasoju2712/nobel_crawled_data')
#store nobel_winner_wiki_information as pickled file
with open('/home/apoorva_kasoju2712/wos_samplecode/nobel_winner_wiki_p.pickle', 'wb') as handle:
pickle.dump(nobel_winner_wiki_information, handle, protocol=pickle.HIGHEST_PROTOCOL)
#retrieve stored nobel_winner_wiki_information
with open('/home/apoorva_kasoju2712/wos_samplecode/nobel_winner_wiki_p.pickle', 'rb') as handle:
nobel_winner_wiki_information = pickle.load(handle)
path2rawdata='/home/apoorva_kasoju2712/WOS_data'
#loading article_df
article_df=load_data.load_article_data(path2rawdata)
#converting author_df from hdf5
article_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/article_df_data.h5','article_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded article_df
#article_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/article_df_data.h5','article_df')
#loading author df
author_df=load_data.load_author_data(path2rawdata)
#converting author_df from hdf5
author_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/author_df_data_full.h5','author_df_full',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded article_df
author_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/author_df_data_full.h5','author_df_full')
#loading address df
address_df=load_data.load_address_data(path2rawdata)
#converting address df to hdf5 and store
address_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/address_df_data.h5','address_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded address_df
#address_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/address_df_data.h5','address_df',mode='w',format='table',complevel=9,complib ='blosc')
#loading paper_address df
paper_address_df=load_data.load_paper_address_data(path2rawdata)
#converting paper_address df to hdf5 and store
paper_address_df.to_hdf('/home/apoorva_kasoju2712/wos_samplecode/paper_address_df_data.h5','paper_address_df',mode='w',format='table',complevel=9,complib ='blosc')
#or use loaded paper_address_df
paper_address_df=pd.read_hdf('/home/apoorva_kasoju2712/wos_samplecode/paper_address_df_data.h5','paper_address_df')
#merge paper_address and address_df
address_df_merged=pd.merge(paper_address_df[['ArticleID','AddressOrder','Organization','SubOrganization']], address_df[['ArticleID','AuthorOrder','AddressOrder']], how='inner', on=['ArticleID','AddressOrder'])
address_df_merged["AddressOrder"]=address_df_merged["AddressOrder"].astype('int64')
address_df_merged["AuthorOrder"]=address_df_merged["AuthorOrder"].astype('int64')
address_df_merged.sort_values(by = ['AuthorOrder','AddressOrder'], inplace = True)
address_df_merged.dropna(subset=['AuthorOrder','ArticleID'], inplace=True)
#prepare author_address
author_address=
|
pd.merge(author_df[['ArticleID','FullName', 'LastName', 'FirstName','AuthorDAIS','AuthorOrder']],address_df_merged[['ArticleID','AuthorOrder','Organization']],on=['ArticleID','AuthorOrder'], how='inner')
|
pandas.merge
|
"""
Script to run FAST(Fourier amplitude sensitivity testing).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
#from pysqlite3 import dbapi2 as sqlite3
import os
import subprocess
from distutils.dir_util import remove_tree
import shutil
import time
import spotpy
import pandas as pd
from collections import OrderedDict
import numpy as np
import sys
import matplotlib.pyplot as plt
import logging
logging.basicConfig(filename='fast.log', level=logging.INFO, filemode="w")
plt.switch_backend('agg')
class fast_run_setup(object):
def __init__(self, parallel='seq'):
self.params = [spotpy.parameter.Uniform('exponential_decrease_62',low=0.5, high=3, optguess=1.5),
spotpy.parameter.Uniform('lateral_conductivity_62',low=0.0002, high=0.0015, optguess=0.0008),
spotpy.parameter.Uniform('exponent',low=-.20, high=-.15, optguess=-.17)
]
self.evals1 = pd.read_csv(VALIDATION1_CSV)['value'].values
self.evals2 = pd.read_csv(VALIDATION2_CSV)['value'].values
self.parallel = parallel
def parameters(self):
return spotpy.parameter.generate(self.params)
#setting up simulation for location:12189500 with predefined params and writing to config file
def simulation(self, x):
pid = str(os.getpid())
logging.info("Initiating Copy for Process %d", format(pid))
child_dir = "./" + DIR_PREFIX + pid
shutil.copytree(".", child_dir, ignore=shutil.ignore_patterns(DIR_PREFIX + "*", DB_NAME + "*"))
logging.info("Copy for Process completed %d", format(pid))
logging.info("Forking into %s", child_dir)
os.chdir(child_dir)
#write DREAM parameter input to config file.
#soil parameters
#loam and roccky colluvium a function of sandy loam based on values in Rawls et al., 1982
#sandy loam, soil type 62, sat K and exponential decrease determined by script
change_setting(CONFIG_FILE, "Exponential Decrease 62", str(round(x[0],5)))
change_setting(CONFIG_FILE, "Lateral Conductivity 62", str(round(x[1],5)))
change_setting(CONFIG_FILE, "Maximum Infiltration 62", str(round(x[1]*2,5))) #assume equalt to 2*saturated hydraulic conductivity
# change_setting(CONFIG_FILE, "Porosity 62"," ".join([str(round(x[2],5)),str(round(x[2]-.05,5)),str(round(x[2]-.1,5))]))
change_setting(CONFIG_FILE, "Vertical Conductivity 62"," ".join([str(round(x[1],5)),str(round(x[1],5)),str(round(x[1],5))]))
#loam - sat K and exponential decrease 5 times less than sandy loam, porosity equal to sandy loam layer 1 (to account for high depth)
change_setting(CONFIG_FILE, "Exponential Decrease 61", str(round(x[0]/5,5)))
change_setting(CONFIG_FILE, "Lateral Conductivity 61", str(round(x[1]/5,5)))
change_setting(CONFIG_FILE, "Maximum Infiltration 61", str(round(x[1]/5*2,5)))
# change_setting(CONFIG_FILE, "Porosity 61"," ".join([str(round(x[2],5)),str(round(x[2],5)),str(round(x[2],5))]))
change_setting(CONFIG_FILE, "Vertical Conductivity 61"," ".join([str(round(x[1]/5,5)),str(round(x[1]/5,5)),str(round(x[1]/5,5))]))
#rocky colluvium -treat as coarse sand - sat K and exponential decrease are 2 to 3 times greater than sandy loam, porosity .1 to .15 less than sandy loam
change_setting(CONFIG_FILE, "Exponential Decrease 65", str(round(x[0]*2,5)))
change_setting(CONFIG_FILE, "Lateral Conductivity 65", str(round(x[1]*3,5)))
change_setting(CONFIG_FILE, "Maximum Infiltration 65", str(round(x[1]*3,5)))
# change_setting(CONFIG_FILE, "Porosity 65"," ".join([str(round(x[2],5)),str(round(x[2]-.1,5)),str(round(x[2]-.15,5))]))
change_setting(CONFIG_FILE, "Vertical Conductivity 65"," ".join([str(round(x[1]*3,5)),str(round(x[1]*3,5)),str(round(x[1]*3,5))]))
write_streamclass(stream_geometry,x[2],stream_class_file)
#run DHSVM with modified parameters in config file
subprocess.call(DHSVM_CMD, shell=True, stdout=False, stderr=False)
simulations=[]
#read streamflow data from DHSVM output file
with open(STREAMFLOW_ONLY, 'r') as file_output:
header_name = file_output.readlines()[0].split(' ')
with open(STREAMFLOW_ONLY) as inf:
next(inf)
date_q = []
q_12189500 = []
q_12186000 = []
for line in inf:
parts = line.split()
if len(parts) > 1:
date_q.append(parts[0])
q_12189500.append(float(parts[10])/(3600*1)) #12189500 is Sauk at Sauk
q_12186000.append(float(parts[374])/(3600*1)) #1218600 us Sauk above Whitechuck
os.chdir("..")
logging.info("Removing copied directory %s", str(child_dir))
remove_tree(child_dir)
logging.info("Removed directory %s", str(child_dir))
simulation_streamflow =
|
pd.DataFrame({'x[0]':date_q, 'x[180]':q_12189500,'x[5676]':q_12186000})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import torch
import os.path
from glob import glob
from datetime import datetime
from base.torchvision_dataset import TorchvisionDataset
from torch.utils.data import TensorDataset
class HR_Dataset(TorchvisionDataset):
def __init__(self, root:str, normal_class):
super().__init__(root)
self.normal_class = normal_class
# x_array = [[[0 for k in range(3)] for j in range(11932)]]
# load lists of participant ids
# id_fb, id_nfb = load_id('/workspace/HR_WearablesData/')
# id_fb = np.load("/workspace/fitbit_id.npy")
# id_nfb = np.load("/workspace/nonfitbit_id.npy")
# id_anomalies = load_labels('/workspace/datasets/Health New Labeling.xlsx')
# df = load_fitbit_data(id_fb[0])
# x_array = cut_to_same_length(df, x_array)
# y_array = np.zeros(x_array.shape[0])
# index_array = np.arange(x_array.shape[0])
print("start")
dim1_train = pd.read_csv("/workspace/dim1_train.txt").to_numpy()
dim2_train = pd.read_csv("/workspace/dim2_train.txt").to_numpy()
dim3_train = pd.read_csv("/workspace/dim3_train.txt").to_numpy()
dim1_test = pd.read_csv("/workspace/dim1_test.txt").to_numpy()
dim2_test = pd.read_csv("/workspace/dim2_test.txt").to_numpy()
dim3_test = pd.read_csv("/workspace/dim3_test.txt").to_numpy()
labels_train = pd.read_csv("/workspace/labels_train.csv").to_numpy()
labels_test =
|
pd.read_csv("/workspace/labels_test.csv")
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from subprocess import check_output
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
# distance package is used for finding longest common subsequence between two strings
import os, gc, re, distance
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
from sklearn.manifold import TSNE
# Import the Required lib packages for WORD-Cloud generation
# https://stackoverflow.com/questions/45625434/how-to-install-wordcloud-in-python3-6
from wordcloud import WordCloud, STOPWORDS
from os import path
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
#https://stackoverflow.com/questions/12468179/unicodedecodeerror-utf8-codec-cant-decode-byte-0x9c
if os.path.isfile('dataset\df_fe_without_preprocessing_train.csv'):
df =
|
pd.read_csv("dataset\df_fe_without_preprocessing_train.csv",encoding='latin-1')
|
pandas.read_csv
|
# grid search holt winter's exponential smoothing
from math import sqrt
from multiprocessing import cpu_count
from joblib import Parallel
from joblib import delayed
from warnings import catch_warnings
from warnings import filterwarnings
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from sklearn.metrics import mean_squared_error
from numpy import array
from pandas import read_csv
from pandas import to_datetime
# split a univariate dataset into train1/ test1 sets containing both X and y (last value on a row)
def train_test_split_all(data, n_testratio):
n_test = int(len(data) * n_testratio)
return data[:-n_test], data[-n_test:]
# one-step Holt Winters Exponential Smoothing forecast
def exp_smoothing_forecast(history, config):
t,d,s,p,b,r = config
# define model
history = array(history)
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p)
# fit model
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r)
# make one step forecast
yhat = model_fit.predict(len(history), len(history))
return yhat[0]
# root mean squared error or rmse
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, cfg):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = exp_smoothing_forecast(history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# estimate prediction error
error = measure_rmse(test, predictions)
return error
# score a model, return None on failure
def score_model(data, n_test, cfg, debug=False):
result = None
# convert config to a key
key = str(cfg)
# show all warnings and fail on exception if debugging
if debug:
result = walk_forward_validation(data, n_test, cfg)
else:
# one failure during model validation suggests an unstable config
try:
# never show warnings when grid searching, too noisy
with catch_warnings():
filterwarnings("ignore")
result = walk_forward_validation(data, n_test, cfg)
except:
error = None
# check for an interesting result
if result is not None:
print(' > Model[%s] %.3f' % (key, result))
return (key, result)
# grid search configs
def grid_search(data, cfg_list, n_test, parallel=True):
scores = None
if parallel:
# execute configs in parallel
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing')
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list)
scores = executor(tasks)
else:
scores = [score_model(data, n_test, cfg) for cfg in cfg_list]
# remove empty results
scores = [r for r in scores if r[1] != None]
# sort configs by error, asc
scores.sort(key=lambda tup: tup[1])
return scores
# create a set of exponential smoothing configs to try
def exp_smoothing_configs(seasonal=[None]):
models = list()
# define config lists
t_params = ['add', 'mul', None]
d_params = [True, False]
s_params = ['add', 'mul', None]
p_params = seasonal
b_params = [True, False]
r_params = [True, False]
# create config instances
for t in t_params:
for d in d_params:
for s in s_params:
for p in p_params:
for b in b_params:
for r in r_params:
cfg = [t,d,s,p,b,r]
models.append(cfg)
return models
if __name__ == '__main__':
# Load data
DIR = '../Logsn/ind_and_selBcol/v140/'
FILE = 'JPmth023.csv'
filename = DIR + FILE
# Use labels
names = ['BatteryStateOfCharge_Percent','BatteryVoltage_V','A_mean','min',
'Wh_sum','DSOC','DV','fD_all','fD_sel','cyc','TemperatureEnvironment_C','SOH']
# Below if swapping to file from one month interval to X interval data.
raw_data =
|
read_csv(filename, header=0)
|
pandas.read_csv
|
# Builtins
import datetime as dt
from typing import Any, Dict, List, Tuple
import os.path
from pathlib import Path
# External libraries
import pandas as pd
import numpy as np
from tqdm import tqdm
import pytz
# Submodule imports
from harvest.storage import PickleStorage
import harvest.trader.trader as trader
from harvest.api.yahoo import YahooStreamer
from harvest.api.paper import PaperBroker
from harvest.storage import BaseLogger
from harvest.utils import *
class BackTester(trader.PaperTrader):
"""
This class replaces several key functions to allow backtesting
on historical data.
"""
def __init__(self, streamer=None, debug=False, config={}):
"""Initializes the TestTrader."""
self.streamer = YahooStreamer() if streamer is None else streamer
self.broker = PaperBroker()
self.storage = PickleStorage(limit_size=False) # local cache of historic price
self._init_attributes()
self._setup_debugger(debug)
def start(
self,
interval: str = "5MIN",
aggregations: List[Any] = [],
source: str = "PICKLE",
path: str = "./data",
start=None,
end=None,
period=None,
):
"""Runs backtesting.
The interface is very similar to the Trader class, with some additional parameters for specifying
backtesting configurations.
:param str? interval: The interval to run the algorithm on. defaults to '5MIN'.
:param List[str]? aggregations: The aggregations to run. defaults to [].
:param str? source: The source of backtesting data.
'FETCH' will pull the latest data using the broker (if specified).
'CSV' will read data from a locally saved CSV file.
'PICKLE' will read data from a locally saved pickle file, generated using the Trader class.
defaults to 'PICKLE'.
:param str? path: The path to the directory which backtesting data is stored.
This parameter must be set accordingly if 'source' is set to 'CSV' or 'PICKLE'. defaults to './data'.
"""
debugger.debug(f"Storing asset data in {path}")
for a in self.algo:
a.config()
self._setup(source, interval, aggregations, path, start, end, period)
self.broker.setup(self.interval, self, self.main)
self.streamer.setup(self.interval, self, self.main)
for a in self.algo:
a.setup()
a.trader = self
self.run_backtest()
def _setup(
self,
source: str,
interval: str,
aggregations: List,
path: str,
start,
end,
period,
):
self._setup_params(interval, aggregations)
self._setup_account()
self.df = {}
self.storage.limit_size = False
start = convert_input_to_datetime(start, self.timezone)
end = convert_input_to_datetime(end, self.timezone)
period = convert_input_to_timedelta(period)
if start is None:
if end is None:
if period is None:
start = "MAX"
else:
start = "PERIOD"
else:
start = end - period
if end is None:
if start == "MAX" or start == "PERIOD" or period is None:
end = "MAX"
else:
end = start + period
if source == "PICKLE":
self.read_pickle_data()
elif source == "CSV":
self.read_csv_data(path)
else:
raise Exception(f"Invalid source {source}. Must be 'PICKLE' or 'CSV'")
common_start = None
common_end = None
for s in self.interval:
for i in [self.interval[s]["interval"]] + self.interval[s]["aggregations"]:
df = self.storage.load(s, i, no_slice=True)
if common_start is None or df.index[0] > common_start:
common_start = df.index[0]
if common_end is None or df.index[-1] < common_end:
common_end = df.index[-1]
if start == "PERIOD":
start = common_end - period
if start != "MAX" and start < common_start:
raise Exception(f"Not enough data is available for a start time of {start}")
if end != "MAX" and end > common_end:
raise Exception(
f"Not enough data is available for an end time of {end}: \nLast datapoint is {common_end}"
)
if start == "MAX":
start = common_start
if end == "MAX":
end = common_end
self.common_start = start
self.common_end = end
print(f"Common start: {start}, common end: {end}")
for s in self.interval:
for i in [self.interval[s]["interval"]] + self.interval[s]["aggregations"]:
df = self.storage.load(s, i, no_slice=True).copy()
df = df.loc[start:end]
self.storage.reset(s, i)
self.storage.store(s, i, df)
conv = {
Interval.MIN_1: 1,
Interval.MIN_5: 5,
Interval.MIN_15: 15,
Interval.MIN_30: 30,
Interval.HR_1: 60,
Interval.DAY_1: 1440,
}
# Generate the "simulated aggregation" data
for sym in self.interval:
interval = self.interval[sym]["interval"]
interval_txt = interval_enum_to_string(interval)
df = self.storage.load(sym, interval)
df_len = len(df.index)
debugger.debug(f"Formatting {sym} data...")
for agg in self.interval[sym]["aggregations"]:
agg_txt = interval_enum_to_string(agg)
# tmp_path = f"{path}/{sym}-{interval_txt}+{agg_txt}.pickle"
tmp_path = f"{path}/{sym}@{int(agg)-16}.pickle"
file = Path(tmp_path)
if file.is_file():
data = self.storage.open(sym, int(agg)-16)
self.storage.store(sym, int(agg)-16, data, save_pickle=False)
continue
# TODO: check if file is updated with latest data
debugger.debug(
f"Formatting aggregation from {interval_txt} to {agg_txt}..."
)
points = int(conv[agg] / conv[interval])
for i in tqdm(range(df_len)):
df_tmp = df.iloc[0 : i + 1]
df_tmp = df_tmp.iloc[
-points:
] # Only get recent data, since aggregating the entire df will take too long
agg_df = aggregate_df(df_tmp, agg)
self.storage.store(
sym, int(agg) - 16, agg_df.iloc[[-1]], remove_duplicate=False
)
debugger.debug("Formatting complete")
# # Save the current state of the queue
# for s in self.watch:
# self.load.append_entry(s, self.interval, self.storage.load(s, self.interval))
# for i in self.aggregations:
# self.load.append_entry(s, '-'+i, self.storage.load(s, '-'+i), False, True)
# self.load.append_entry(s, i, self.storage.load(s, i))
# Move all data to a cached dataframe
for sym in self.interval:
self.df[sym] = {}
inter = self.interval[sym]["interval"]
interval_txt = interval_enum_to_string(inter)
df = self.storage.load(sym, inter, no_slice=True)
self.df[sym][inter] = df.copy()
for agg in self.interval[sym]["aggregations"]:
# agg_txt = interval_enum_to_string(agg)
# agg_txt = f"{interval_txt}+{agg_txt}"
df = self.storage.load(sym, int(agg) - 16, no_slice=True)
self.df[sym][int(agg) - 16] = df.copy()
# Trim data so start and end dates match between assets and intervals
# data_start = pytz.utc.localize(dt.datetime(1970, 1, 1))
# data_end = pytz.utc.localize(dt.datetime.utcnow().replace(microsecond=0, second=0))
# for i in [self.interval] + self.aggregations:
# for s in self.watch:
# start = self.df[i][s].index[0]
# end = self.df[i][s].index[-1]
# if start > data_start:
# data_start = start
# if end < data_end:
# data_end = end
# for i in [self.interval] + self.aggregations:
# for s in self.watch:
# self.df[i][s] = self.df[i][s].loc[data_start:data_end]
self.load_watch = True
def read_pickle_data(self):
"""Function to read backtesting data from a local file.
:interval: The interval of the data
:path: Path to the local data file
:date_format: The format of the data's timestamps
"""
for s in self.interval:
for i in [self.interval[s]["interval"]] + self.interval[s]["aggregations"]:
df = self.storage.open(s, i).dropna()
if df.empty or now() - df.index[-1] > dt.timedelta(days=1):
df = self.streamer.fetch_price_history(s, i).dropna()
self.storage.store(s, i, df)
def read_csv_data(self, path: str, date_format: str = "%Y-%m-%d %H:%M:%S"):
"""Function to read backtesting data from a local CSV file.
:interval: The interval of the data
:path: Path to the local data file
:date_format: The format of the data's timestamps
"""
for s in self.interval:
for i in [self.interval[s]["interval"]] + self.interval[s]["aggregations"]:
i_txt = interval_enum_to_string(i)
df = self.read_csv(f"{path}/{s}-{i_txt}.csv").dropna()
if df.empty:
df = self.streamer.fetch_price_history(s, i).dropna()
self.storage.store(s, i, df)
def read_csv(self, path: str) -> pd.DataFrame:
"""Reads a CSV file and returns a Pandas DataFrame.
:path: Path to the CSV file.
"""
if not os.path.isfile(path):
return pd.DataFrame()
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
# AUTOGENERATED! DO NOT EDIT! File to edit: utils.ipynb (unless otherwise specified).
__all__ = ['merge_csv_in_dir', 'tree_select_kv', 'deep_path_from_keysequence', 'get_paths', 'show_tabs', 'take_while',
'partition', 'partition_all', 'create_embedding_files_for_visualization', 'pipe', 'dedupe_conseq']
# Cell
from typing import List
from fastcore.basics import typed
from fastcore.test import *
from toolz import thread_first, thread_last
import proseflow.text as txt
from functools import reduce
from typing import Iterable, List, Union
from pydash import get
from typeguard import typechecked
# Cell
import os
import pandas as pd
from itertools import product
import argparse
import sys
def merge_csv_in_dir(dir_path):
for dirpath, _, fnames in os.walk(dir_path):
fpaths = (["/".join(t) for t in [*product([dirpath], fnames)]])
dfs = [
|
pd.read_csv(fpath)
|
pandas.read_csv
|
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
|
assert_series_equal(expect_out, actual_out, check_names=False)
|
pandas.util.testing.assert_series_equal
|
import argparse
import sys
import torch
import rdkit
import pandas as pd
from tqdm import tqdm
from dglt.contrib.moses.moses.models_storage import ModelsStorage
from dglt.contrib.moses.moses.script_utils import add_sample_args, set_seed
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
MODELS = ModelsStorage()
def get_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
title='Models sampler script', description='available models')
for model in MODELS.get_model_names():
add_sample_args(subparsers.add_parser(model))
return parser
def main(model, config):
set_seed(config.seed)
device = torch.device(config.device)
# For CUDNN to work properly:
if device.type.startswith('cuda'):
torch.cuda.set_device(device.index or 0)
model_config = torch.load(config.config_load)
model_vocab = torch.load(config.vocab_load)
model_state = torch.load(config.model_load)
model = MODELS.get_model_class(model)(model_vocab, model_config)
model.load_state_dict(model_state)
model = model.to(device)
model.eval()
samples = []
n = config.n_samples
total_generated = 0
with tqdm(total=config.n_samples, desc='Generating samples') as T:
while n > 0:
current_samples = model.sample(
min(n, config.n_batch), config.max_len
)
samples.extend(current_samples)
total_generated += min(n, config.n_batch)
n -= len(current_samples)
T.update(len(current_samples))
valid_rate = len(samples) * 1.0 / total_generated
postfix=[f'generated={total_generated:d}',
f'valid={valid_rate:.5f}']
T.set_postfix_str(' '.join(postfix))
samples =
|
pd.DataFrame(samples, columns=['SMILES'])
|
pandas.DataFrame
|
"""
Microsimulation by a sequence of microsynthesised populations
"""
import numpy as np
import pandas as pd
#from random import randint
import humanleague as hl
import ukpopulation.nppdata as nppdata
import ukpopulation.snppdata as snppdata
import ukpopulation.customsnppdata as customsnppdata
import ukpopulation.myedata as myedata
import microsimulation.utils as utils
import microsimulation.common as common
class SequentialMicrosynthesis(common.Base):
"""
Static microsimulation based on a sequence of microsyntheses
Performs a sequence of static microsyntheses using census data as a seed populations and mid-year-estimates as marginal
constraints. This is the simplest microsimulation model and is intended as a comparison/calibration for Monte-Carlo
based microsimulation
"""
def __init__(self, region, resolution, variant, is_custom=False, cache_dir="./cache", output_dir="./data", fast_mode=False):
common.Base.__init__(self, region, resolution, cache_dir)
self.output_dir = output_dir
self.fast_mode = fast_mode
self.variant = variant
self.is_custom = is_custom
# init the population (projections) modules
self.mye_api = myedata.MYEData(cache_dir)
self.npp_api = nppdata.NPPData(cache_dir)
if self.is_custom:
if variant not in customsnppdata.list_custom_projections(cache_dir):
raise ValueError("Requested custom SNPP %s is not in the cache directory (%s)" % (variant, cache_dir))
print("Using custom SNPP variant %s" % variant)
print("NOTE: assuming custom SNPP variant disables rescaling to national variant")
self.snpp_api = customsnppdata.CustomSNPPData(variant, cache_dir)
else:
self.snpp_api = snppdata.SNPPData(cache_dir)
# validation
if not is_custom and self.variant not in nppdata.NPPData.VARIANTS:
raise ValueError(self.variant + " is not a known projection variant")
if not isinstance(self.fast_mode, bool):
raise ValueError("fast mode should be boolean")
# TODO enable 2001 ref year?
# (down)load the census 2011 tables
self.__get_census_data()
def run(self, ref_year, target_year):
"""
Run the sequence
"""
# TODO enable 2001 ref year?
if ref_year != 2011:
raise ValueError("(census) reference year must be 2011")
if target_year < 2001:
raise ValueError("2001 is the earliest supported target year")
if target_year > self.npp_api.max_year():
raise ValueError(str(self.npp_api.max_year()) + " is the current latest supported end year")
if self.fast_mode:
print("Running in fast mode. Rounded IPF populations may not exactly match the marginals")
print("Starting microsynthesis sequence...")
for year in utils.year_sequence(ref_year, target_year):
out_file = self.output_dir + "/ssm_" + self.region + "_" + self.resolution + "_" + self.variant + "_" + str(year) + ".csv"
# this is inconsistent with the household microsynth (batch script checks whether output exists)
# TODO make them consistent?
# With dynamic update of seed for now just recompute even if file exists
#if not os.path.isfile(out_file):
if year < self.snpp_api.min_year(self.region):
source = " [MYE]"
elif year <= self.snpp_api.max_year(self.region):
source = " [SNPP]"
else:
source = " [XNPP]"
print("Generating ", out_file, source, "... ",
sep="", end="", flush=True)
msynth = self.__microsynthesise(year)
print("OK")
msynth.to_csv(out_file, index_label="PID")
def __microsynthesise(self, year): #LAD=self.region
# Census/seed proportions for geography and ethnicity
oa_prop = self.seed.sum((1, 2, 3)) / self.seed.sum()
eth_prop = self.seed.sum((0, 1, 2)) / self.seed.sum()
if year < self.snpp_api.min_year(self.region):
age_sex = utils.create_age_sex_marginal(utils.adjust_pp_age(self.mye_api.filter(self.region, year)), self.region)
elif year <= self.npp_api.max_year():
# Don't attempt to apply NPP variant if before the start of the NPP data, or it's a custom SNPP
if year < self.npp_api.min_year() or self.is_custom:
age_sex = utils.create_age_sex_marginal(utils.adjust_pp_age(self.snpp_api.filter(self.region, year)), self.region)
else:
age_sex = utils.create_age_sex_marginal(utils.adjust_pp_age(self.snpp_api.create_variant(self.variant, self.npp_api, self.region, year)), self.region)
else:
raise ValueError("Cannot microsimulate past NPP horizon year ({})", self.npp_api.max_year())
# convert proportions/probabilities to integer frequencies
oa = hl.prob2IntFreq(oa_prop, age_sex.sum())["freq"]
eth = hl.prob2IntFreq(eth_prop, age_sex.sum())["freq"]
# combine the above into a 2d marginal using QIS-I and census 2011 or later data as the seed
oa_eth = hl.qisi(self.seed.sum((1, 2)), [np.array([0]), np.array([1])], [oa, eth])
if not (isinstance(oa_eth, dict) and oa_eth["conv"]):
raise RuntimeError("oa_eth did not converge")
# now the full seeded microsynthesis
if self.fast_mode:
msynth = hl.ipf(self.seed, [np.array([0, 3]), np.array([1, 2])], [oa_eth["result"].astype(float), age_sex.astype(float)])
else:
msynth = hl.qisi(self.seed, [np.array([0, 3]), np.array([1, 2])], [oa_eth["result"], age_sex])
if not msynth["conv"]:
print(msynth)
raise RuntimeError("msynth did not converge")
#print(msynth["pop"])
if self.fast_mode:
print("updating seed to", year, " ", end="")
self.seed = msynth["result"]
msynth["result"] = np.around(msynth["result"]).astype(int)
else:
print("updating seed to", year, " ", end="")
self.seed = msynth["result"].astype(float)
rawtable = hl.flatten(msynth["result"]) #, c("OA", "SEX", "AGE", "ETH"))
# col names and remapped values
table =
|
pd.DataFrame(columns=["Area", "DC1117EW_C_SEX", "DC1117EW_C_AGE", "DC2101EW_C_ETHPUK11"])
|
pandas.DataFrame
|
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
print(df_all.columns.tolist())
print (df_all.info())
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
avg_hospid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('HospID')['HospID'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('HospID')['HospID'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('HospID')['HospID'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('HospID')['HospID'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('HospID')['HospID'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('HospID')['HospID'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('HospID')['HospID'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('HospID')['HospID'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('HospID')['HospID'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('HospID')['HospID'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/total op sum all years HospID.csv")
# print("details on site id dist:")
# # print("num of all sites: ", len(df_sum_all_Years))
#
# less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
# less_8.to_csv("total op less 10 years siteid.csv")
# print("num of sites with less years: ", len(less_8))
#
# x = np.array(less_8['Distinct_years'])
# print(np.unique(x))
avg_hospid['HospID'] = df_sum_all_Years['HospID']
avg_hospid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_hospid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_hospid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_reop():
df2010 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_hospid['num_of_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/sum all years HospID reop.csv")
# -----------------------first op------------------------------------
df_10 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2010_FirstOperation')
df_11 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2011_FirstOperation')
df_12 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2012_FirstOperation')
df_13 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2013_FirstOperation')
df_14 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2014_FirstOperation')
df_15 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2015_FirstOperation')
df_16 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2016_FirstOperation')
df_17 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2017_FirstOperation')
df_18 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2018_FirstOperation')
df_19 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2019_FirstOperation')
d1 = pd.merge(df_10, df_11, on='HospID', how='outer')
d2 = pd.merge(d1, df_12, on='HospID', how='outer')
d3 = pd.merge(d2, df_13, on='HospID', how='outer')
d4 =
|
pd.merge(d3, df_14, on='HospID', how='outer')
|
pandas.merge
|
import scrapy
import pandas as pd
class ImmoAptListScraper(scrapy.Spider):
name = "immolaendle_apt_scraper"
start_urls = ["https://www.laendleimmo.at/mietobjekt/wohnung"]
custom_settings = {
"AUTOTHROTTLE_ENABLED": True,
"FEED_EXPORT_ENCODING": "utf-8-sig",
"DOWNLOADER_MIDDLEWARE": {
"scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": None,
"scrapy_useragents.downloadermiddlewares.useragents.UserAgentsMiddleware": 500,
},
"USER_AGENTS": [
(
"Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/57.0.2987.110 "
"Safari/537.36"
), # chrome
(
"Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.79 "
"Safari/537.36"
), # chrome
(
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) "
"Gecko/20100101 "
"Firefox/55.0"
), # firefox
],
}
link_list = {"link": []}
def parse(self, response):
for item in response.css("div.list-content h2.title-block-mobile"):
self.link_list["link"].append(
f"https://www.laendleimmo.at{item.css('a.js-ad-click::attr(href)').get()}"
)
# yield {"link": item.css("a.js-ad-click::attr(href)").get()}
for next_page in response.css("ul li.next a"):
yield response.follow(next_page, self.parse)
df =
|
pd.DataFrame(data=self.link_list)
|
pandas.DataFrame
|
import pandas as pd
from copy_annotations.anchor import Anchor
from copy_annotations.annotation import Annotation
from copy_annotations.selection import Selection
ANCHOR = 'anchor'
VALID = 'valid'
X = 'x'
Y = 'y'
CONTENT = 'content'
class Sheet:
transformed_df = None
transformed_annotations = None
transformed_row_map = None
transformed_column_map = None
annotations = None
def __init__(self, dataframe: pd.DataFrame, annotations: dict = None):
if annotations:
self.annotations = [Annotation(annotation) for annotation in annotations]
self.df = dataframe
self.transform()
def find_anchors(self, target_df):
anchors = {}
candidates = self._get_anchors_candidates()
for y, row in target_df.iterrows():
for x, value in row.items():
if isinstance(value, str):
for candidate in candidates:
if value.lower() == candidate[CONTENT].lower():
if anchors.get(value):
anchors[value][VALID] = False
continue
anchors[value] = {
ANCHOR: Anchor(value, candidate[X], candidate[Y], x + 1, y + 1),
VALID: True
}
return [v[ANCHOR] for k, v in anchors.items() if v[VALID]]
def _get_anchors_candidates(self):
candidates = []
for y, row in self.df.iterrows():
for x, value in row.items():
is_annotated = any([a.source_selection.contains(Selection(x + 1, x + 1, y + 1, y + 1)) for a in self.annotations])
if isinstance(value, str) and not is_annotated:
candidates.append({
CONTENT: value,
X: x + 1,
Y: y + 1,
})
return candidates
def represent_transformed_annotations(self, key):
default_content = 'UNLABELED' if self.annotations else ''
annotation_df =
|
pd.DataFrame(default_content, columns=self.transformed_df.columns, index=self.transformed_df.index)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Read streamed and locally saved twitter data."""
import pandas as pd
if __name__ == "__main__":
local_csv_fpath = "CSV_FILE_TWEETS_LOCAL.csv"
cols_to_show = [
# "id",
# "user",
"screen_name",
"location",
"created_at"
# "geo",
# "text",
]
dtypes_dict = {
"id": str,
"user": str,
"screen_name": str,
"location": str,
"text": str,
"followers": int,
}
# Read data
df = pd.read_csv(
local_csv_fpath,
dtype=dtypes_dict,
lineterminator="\n",
parse_dates=["created_at"],
)
# Convert datetime col to EST
df["created_at"] = pd.to_datetime(df["created_at"]).dt.tz_convert(
"US/Eastern"
)
# Show subset of columns
with pd.option_context("display.max_columns", 100):
with pd.option_context("display.max_rows", 500):
with
|
pd.option_context("display.width", 1000)
|
pandas.option_context
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df=pd.read_feather('../train_basic_features.pkl')
test_df=pd.read_feather('../test__basic_features.pkl')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
catCols = ['user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type']
train_df, test_df = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 10, minCount = 0)
train_df.to_feather('../train_cat_targetenc.pkl')
test_df.to_feather('../test_cat_targetenc.pkl')
################################################################
# Tfidf - part 2
################################################################
import os; os.environ['OMP_NUM_THREADS'] = '1'
from sklearn.decomposition import TruncatedSVD
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
import time
from typing import List, Dict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
stopwords = stopwords.words('russian')
train_per=pd.read_csv('../input/train_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test['params'] = train_test['param_1'].fillna('') + ' ' + train_test['param_2'].fillna('') + ' ' + train_test['param_3'].fillna('')
import re
train_test.drop(['param_1', 'param_2', 'param_3'], axis = 'columns', inplace=True)
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
import re
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf2.pkl")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 10000,max_df=.4,#min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf3.pkl")
del(train_test); gc.collect()
train_per=pd.read_csv('../input/train_active.csv', usecols = ['title'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['title'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test.fillna('NA', inplace=True)
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf2.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf3.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
del(train_test); gc.collect()
###Too slow###
'''
train_per=pd.read_csv('../input/train_active.csv', usecols = ['description'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['description'])#,'title','description'])
train_per.fillna(' ', inplace=True)
test_per.fillna(' ', inplace=True)
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, stop_words = stopwords#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf2.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf3.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
'''
##########################################
# 13. Chargram -- too slow
##########################################
from collections import Counter
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
def char_ngrams(s):
s = s.lower()
s = s.replace(u' ', '')
result = Counter()
len_s = len(s)
for n in [3, 4, 5]:
result.update(s[i:i+n] for i in range(len_s - n + 1))
return ' '.join(list(result))
data = pd.concat((train_df, test_df), axis = 'rows')
data['param_chargram'] = list(data[['params']].apply(lambda x: char_ngrams(x['params']), axis=1))
data['title_chargram'] = list(data[['title']].apply(lambda x: char_ngrams(x['title']), axis=1))
#data['desc_chargram'] = list(data[['description']].apply(lambda x: char_ngrams(x['description']), axis=1))
#data['count_common_chargram'] = data.apply(lambda x: len(set(str(x['title_chargram']).lower().split()).intersection(set(str(x['desc_chargram']).lower().split()))), axis=1)
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
#####Chargram -TFIDF
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['title_chargram'].values.tolist() + test_df['title_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../title_chargram_tfidf.pkl')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['param_chargram'].values.tolist() + test_df['param_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['param_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['param_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../param_chargram_tfidf.pkl')
#######Chargram of Cat and Parent cat
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
train_df = pd.read_feather('../train_basic_features.pkl')
test_df = pd.read_feather('../test__basic_features.pkl')
data = pd.concat([train_df, test_df], axis= 'rows')
data['categories'] = data["parent_category_name"].fillna(' ') + data["category_name"].fillna(' ')
data['cat_chargram'] = list(data[['categories']].apply(lambda x: char_ngrams(x['categories']), axis=1))
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 1000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['cat_chargram'].values.tolist() + test_df['cat_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['cat_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['cat_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../cat_chargram_tfidf.pkl')
##############################
## New Kaggle Ftr
##############################
import pandas as pd
import gc
used_cols = ['item_id', 'user_id']
train = pd.read_csv('../input/train.csv', usecols=used_cols)
train_active = pd.read_csv('../input/train_active.csv', usecols=used_cols)
test = pd.read_csv('../input/test.csv', usecols=used_cols)
test_active = pd.read_csv('../input/test_active.csv', usecols=used_cols)
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])
test_periods = pd.read_csv('../input/periods_test.csv', parse_dates=['date_from', 'date_to'])
train.head()
all_samples = pd.concat([
train,
train_active,
test,
test_active
]).reset_index(drop=True)
all_samples.drop_duplicates(['item_id'], inplace=True)
del train_active
del test_active
gc.collect()
all_periods = pd.concat([
train_periods,
test_periods
])
del train_periods
del test_periods
gc.collect()
all_periods.head()
all_periods['days_up'] = (all_periods['date_to'] - all_periods['date_from']).dt.days
gp = all_periods.groupby(['item_id'])[['days_up']]
gp_df = pd.DataFrame()
gp_df['days_up_sum'] = gp.sum()['days_up']
gp_df['times_put_up'] = gp.count()['days_up']
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={'index': 'item_id'})
gp_df.head()
all_periods.drop_duplicates(['item_id'], inplace=True)
all_periods = all_periods.merge(gp_df, on='item_id', how='left')
all_periods.head()
del gp
del gp_df
gc.collect()
all_periods = all_periods.merge(all_samples, on='item_id', how='left')
all_periods.head()
gp = all_periods.groupby(['user_id'])[['days_up_sum', 'times_put_up']].mean().reset_index() \
.rename(index=str, columns={
'days_up_sum': 'avg_days_up_user',
'times_put_up': 'avg_times_up_user'
})
gp.head()
n_user_items = all_samples.groupby(['user_id'])[['item_id']].count().reset_index() \
.rename(index=str, columns={
'item_id': 'n_user_items'
})
gp = gp.merge(n_user_items, on='user_id', how='left')
gp.head()
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.merge(gp, on='user_id', how='left')
test = test.merge(gp, on='user_id', how='left')
agg_cols = list(gp.columns)[1:]
del gp
gc.collect()
train.head()
train = train[['avg_days_up_user','avg_times_up_user','n_user_items']]
test = test[['avg_days_up_user','avg_times_up_user','n_user_items']]
train.to_feather('../train_kag_agg_ftr.ftr')
test.to_feather('../test_kag_agg_ftr.ftr')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
train_df =
|
pd.read_feather('../train_basic_features_woCats.pkl')
|
pandas.read_feather
|
import json
import math
import random
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn import linear_model
from math import sqrt
from DataSynthesizer.lib.utils import read_json_file
from FAIR.FairnessInRankings import FairnessInRankingsTester
def save_uploaded_file(file, current_file):
"""
Save user uploaded data on server.
Attributes:
file: the uploaded dataset.
current_file: file name with out ".csv" suffix
"""
with open(current_file+".csv", 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def get_score_scatter(current_file,top_K=100):
"""
Generated data for scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size that scatter plot included
Return: data for scatter plot using HighChart format
"""
data = pd.read_csv(current_file+"_weightsum.csv").head(top_K)
scatter_points = []
score_value = data["GeneratedScore"].tolist()
position_value = [x for x in range(1, len(data) + 1)]
for i in range(len(score_value)):
scatter_points.append([position_value[i], score_value[i]])
return scatter_points
def getAttValueCountTopAndOverall(input_data, att_name, top_K=10):
"""
Subfunction to count values of input attribute in the data for top 10 and overall pie chart.
Attributes:
input_data: dataframe that store the input data
att_name: name of attribuet to count
top_K: top k position to count the value, default value is 10
Return: json data includes two two-dimension arrays for value and its count at top 10 and overall
"""
counts_all = {}
all_values_count = input_data[att_name].value_counts()
top_data = input_data[0:top_K]
# get overall counts
new_values_all = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
cur_cate = str(cur_cate)
cur_count = int(all_values_count.values[i])
new_values_all.append([cur_cate,cur_count])
counts_all["overall"] = new_values_all
# get top K counts and make sure list of counts include every value of input attribute for consistent pie chart colors
top_values_count = top_data[att_name].value_counts()
top_cates = top_values_count.index
# generate a dict to store the top k value counts
top_values_count_dic = {}
for i in range(len(top_values_count)):
top_values_count_dic[top_values_count.index[i]] = int(top_values_count.values[i])
# generate a new value list for top K using same order as in over all list
new_values_top = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
str_cur_cate = str(cur_cate)
else:
str_cur_cate = cur_cate
if cur_cate in top_cates: # exiting in top K
new_values_top.append([str_cur_cate, top_values_count_dic[cur_cate]])
else:
new_values_top.append([str_cur_cate, 0])
counts_all["topTen"] = new_values_top
return counts_all
def get_chart_data(current_file, att_names):
"""
Generated data for pie chart.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
att_names: list of attribute names to compute the chart data
Return: json data for pie chart plot using HighChart format
"""
data = pd.read_csv(current_file + "_weightsum.csv")
pie_data = {}
for ai in att_names:
cur_ai_json = {}
counts_all = getAttValueCountTopAndOverall(data,ai)
cur_ai_json["topTen"] = counts_all["topTen"]
cur_ai_json["overall"] = counts_all["overall"]
pie_data[ai] = cur_ai_json
return pie_data
def computeSlopeOfScores(current_file,top_K, round_default=2):
"""
Compute the slop of scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size to compute the slope
Return: slope of scatter plot of top_K data
"""
data = pd.read_csv(current_file + "_weightsum.csv")
top_data = data[0:top_K]
xd = [i for i in range(1,top_K+1)]
yd = top_data["GeneratedScore"].tolist()
# determine best fit line
par = np.polyfit(xd, yd, 1, full=True)
slope = par[0][0]
return round(slope,round_default)
def compute_correlation(current_file,y_col="GeneratedScore",top_threshold=3,round_default=2):
"""
Compute the correlation between attributes and generated scores.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
y_col: column name of Y variable
top_threshold: threshold of number of returned correlated attribute
round_default: threshold of round function for the returned coefficient
Return: list of correlated attributes and its coefficients
"""
# get the data for generated ranking
ranking_df = pd.read_csv(current_file+"_weightsum.csv")
# get the upload data for correlation computation
upload_df = pd.read_csv(current_file+".csv")
numeric_atts = list(upload_df.describe().columns)
X = upload_df[numeric_atts].values
# no need to standardize data
# scaler = StandardScaler()
# transform_X = scaler.fit_transform(X)
y = ranking_df[y_col].values
regr = linear_model.LinearRegression(normalize=False)
regr.fit(X, y)
# get coeff's, ordered by significance
# format weight with decile of 3
for i in range(len(regr.coef_)):
regr.coef_[i] = round(regr.coef_[i], round_default)
# normalize coefficients to [-1,1]
max_coef = max(regr.coef_)
min_coef = min(regr.coef_)
abs_max = max(abs(max_coef),abs(min_coef))
stand_coef = []
for ci in regr.coef_:
new_ci = round(ci/abs_max,round_default)
stand_coef.append(new_ci)
# coeff_zip = zip(regr.coef_, numeric_atts)
coeff_zip = zip(stand_coef, numeric_atts)
coeff_sorted = sorted(coeff_zip, key=lambda tup: abs(tup[0]), reverse=True)
if len(coeff_sorted) > top_threshold:
coeff_return = coeff_sorted[0:top_threshold]
else:
coeff_return = coeff_sorted
# only return top_threshold most correlated attributes
return coeff_return
def compute_statistic_topN(chosed_atts,current_file,top_N,round_default=1):
"""
Compute the statistics of input attributes.
Attributes:
chosed_atts: list of attributes to be computed
current_file: file name that stored the data (with out ".csv" suffix)
top_N: size of data to be used in current_file
round_default: threshold of round function for the returned statistics
Return: json data of computed statistics
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv").head(top_N)
statistic_data = {}
# get the median data
for atti in chosed_atts:
cur_att_max = max(data[atti])
cur_att_median = np.median(data[atti])
cur_att_min = min(data[atti])
statistic_data[atti] = {"max": round(cur_att_max, round_default),
"median": round(cur_att_median, round_default),
"min": round(cur_att_min, round_default)}
return statistic_data
def mergeUnfairRanking(_px, _sensitive_idx, _fprob): # input is the ranking
"""
Generate a fair ranking.
Attributes:
_px: input ranking (sorted), list of ids
_sensitive_idx: the index of protected group in the input ranking
_fprob: probability to choose the protected group
Return: generated fair ranking, list of ids
"""
# _px=sorted(range(len(_inputrankingscore)), key=lambda k: _inputrankingscore[k],reverse=True)
rx = [x for x in _px if x not in _sensitive_idx]
qx = [x for x in _px if x in _sensitive_idx]
rx.reverse() # prepare for pop function to get the first element
qx.reverse()
res_list = []
while (len(qx) > 0 and len(rx) > 0):
r_cur = random.random()
# r_cur=random.uniform(0,1.1)
if r_cur < _fprob:
res_list.append(qx.pop()) # insert protected group first
else:
res_list.append(rx.pop())
if len(qx) > 0:
qx.reverse()
res_list = res_list + qx
if len(rx) > 0:
rx.reverse()
res_list = res_list + rx
if len(res_list) < len(_px):
print("Error!")
return res_list
def runFairOracles(chosed_atts,current_file,alpha_default=0.05,k_threshold=200,k_percentage=0.5):
"""
Run all fairness oracles: FA*IR, Pairwise and Proportion
Attributes:
chosed_atts: list of sensitive attributes
current_file: file name that stored the data (with out ".csv" suffix)
alpha_default: default value of significance level in each oracle
k_threshold: threshold of size of upload data to decide the top-K in FA*IR and Proportion
k_percentage: threshold to help to decide the top-K in FA*IR and Proportion when upload dataset's size less than k_threshold
Return: json data of fairness results of all oracles
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv")
total_n = len(data)
# set top K based on the size of input data
# if N > 200, then set top K = 100, else set top K = 0.5*N
if total_n > k_threshold:
top_K = 100
else:
top_K = int(np.ceil(k_percentage* total_n))
fair_res_data = {} # include all details of fairness validation
fair_statement_data = {} # only include the fairness result, i.e. fair or unfair, True represents fair, False represents unfair.
for si in chosed_atts:
# get the unique value of this sensitive attribute
values_si_att = list(data[si].unique())
# for each value, compute the current pairs and estimated fair pairs
si_value_json = {}
si_fair_json = {}
for vi in values_si_att:
# run FAIR oracle to compute its p-value and alpha_c
p_value_fair,alphac_fair = computePvalueFAIR(si,vi,current_file,top_K)
res_fair= p_value_fair > alphac_fair
# run Pairwise orace to compute its p-value, alpha use the default value
p_value_pairwise = computePvaluePairwise(si,vi,current_file)
res_pairwise = p_value_pairwise > alpha_default
# run Proportion oracle to compute its p-value, alpha use the default value
p_value_proportion = computePvalueProportion(si,vi,current_file,top_K)
res_proportion = p_value_proportion > alpha_default
if not isinstance(vi, str):
filled_vi = vi
else:
filled_vi = vi.replace(" ", "")
si_value_json[filled_vi] = [p_value_fair,alphac_fair,p_value_pairwise,alpha_default,p_value_proportion,alpha_default]
si_fair_json[filled_vi] = [res_fair,res_pairwise,res_proportion]
if not isinstance(si, str):
filled_si = si
else:
filled_si = si.replace(" ", "")
fair_res_data[filled_si] = si_value_json
fair_statement_data[filled_si] = si_fair_json
return fair_res_data, fair_statement_data, alpha_default, top_K
def computePvalueFAIR(att_name,att_value,current_file,top_K,round_default=2):
"""
Compute p-value using FA*IR oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
top_K: top_K value in FA*IR
round_default: threshold of round function for the returned p-value
Return: rounded p-value and adjusted significance level in FA*IR
"""
# input checked_atts includes names of checked sensitive attributes
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
top_data = data[0:top_K]
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name]==att_value].index+1
size_vi = len(position_lists_val)
fair_p_vi = size_vi/total_N
# generate a ranking of tuples with (id,"pro")/(id,"unpro") by current value as protected group
generated_ranking = []
for index, row in top_data.iterrows():
if row[att_name] == att_value:
generated_ranking.append([index,"pro"])
else:
generated_ranking.append([index,"unpro"])
p_value, isFair, posiFail, alpha_c, pro_needed_list = computeFairRankingProbability(top_K,fair_p_vi,generated_ranking)
return round(p_value,round_default),round(alpha_c,round_default)
def computePvaluePairwise(att_name,att_value,current_file, round_default=2):
"""
Compute p-value using Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
run_time: running times of simulation using mergeUnfairRanking
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
fair_p_vi = size_vi / total_N
# get the pre-computed pairwise results from simulation
# simu_data = read_json_file("/home/ec2-user/dataResponsiblyUI/playdata/SimulationPairs_N1000_R1000.json")
simu_data = read_json_file("/home/ec2-user/dataResponsiblyUI/playdata/SimulationPairs_N"+str(total_N)+"_R1000.json")
all_fair_p = list(simu_data.keys())
if str(fair_p_vi) in all_fair_p:
cur_pi = str(fair_p_vi)
else:
diff_p = []
for pi in all_fair_p:
num_pi = float(pi)
diff_p.append(abs(num_pi - fair_p_vi))
min_diff_index = diff_p.index(min(diff_p))
cur_pi = all_fair_p[min_diff_index]
# compute the number of pairs of value > * in the input ranking that is stored in the current file
pair_N_vi, estimated_fair_pair_vi, size_vi = computePairN(att_name,att_value,current_file)
# compute the cdf, i.e. p-value of input pair value
sample_pairs = simu_data[cur_pi]
cdf_pair = Cdf(sample_pairs,pair_N_vi)
# decide to use left tail or right tail
# mode_pair_sim,_ = mode(sample_pairs)
# median_mode = np.median(list(mode_pair_sim))
# if pair_N_vi <= mode_pair_sim:
# p_value = cdf_pair
# else:
# p_value = 1- cdf_pair
return round(cdf_pair,round_default)
def computePvaluePairwise_simu(att_name,att_value,current_file, run_time=100, round_default=2):
"""
Compute p-value using Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
run_time: running times of simulation using mergeUnfairRanking
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
fair_p_vi = size_vi / total_N
seed_random_ranking = [x for x in range(total_N)] # list of IDs
seed_f_index = [x for x in range(size_vi)] # list of IDs
# for simulation outputs
data_file = "./media/FairRankingGeneration"
plot_df = pd.DataFrame(columns=["RunCount", "N", "sensi_n", "fair_mp", "pair_n"])
# run simulations, in each simulation, generate a fair ranking with input N and size of sensitive group
for ri in range(run_time):
# only for binary sensitive attribute
output_ranking = mergeUnfairRanking(seed_random_ranking, seed_f_index, fair_p_vi)
position_pro_list = [i for i in range(len(output_ranking)) if output_ranking[i] in seed_f_index]
count_sensi_prefered_pairs = 0
for i in range(len(position_pro_list)):
cur_position = position_pro_list[i]
left_sensi = size_vi - (i + 1)
count_sensi_prefered_pairs = count_sensi_prefered_pairs + (total_N - cur_position - left_sensi)
# count_other_prefered_pairs = (_input_sensi_n*(_input_n-_input_sensi_n)) - count_sensi_prefered_pairs
cur_row = [ri + 1, total_N, size_vi, fair_p_vi, count_sensi_prefered_pairs]
plot_df.loc[len(plot_df)] = cur_row
# save the data of pairs in fair ranking generation on server
plot_df.to_csv(data_file + "_R" + str(run_time) + "_N" + str(total_N) + "_S" + str(size_vi) + "_pairs.csv")
# compute the number of pairs of value > * in the input ranking that is stored in the current file
pair_N_vi, estimated_fair_pair_vi, size_vi = computePairN(att_name,att_value,current_file)
# compute the cdf, i.e. p-value of input pair value
sample_pairs = list(plot_df["pair_n"].dropna())
cdf_pair = Cdf(sample_pairs,pair_N_vi)
# decide to use left tail or right tail
# mode_pair_sim,_ = mode(sample_pairs)
# median_mode = np.median(list(mode_pair_sim))
# if pair_N_vi <= mode_pair_sim:
# p_value = cdf_pair
# else:
# p_value = 1- cdf_pair
return round(cdf_pair,round_default)
def computePvalueProportion(att_name,att_value,current_file, top_K, round_default=2):
"""
Compute p-value using Proportion oracle, i.e., z-test method of 4.1.3 in "A survey on measuring indirect discrimination in machine learning".
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold to decide the positive outcome. Ranked inside top_K is positive outcome. Otherwise is negative outcome.
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
# using z-test method of 4.1.3 in "A survey on measuring indirect discrimination in machine learning"
# for binary attribute only
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
top_data = data[0:top_K]
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
size_other = total_N - size_vi
size_vi_top = len(top_data[top_data[att_name]==att_value].index +1)
size_other_top = top_K - size_vi_top
p_vi_top = size_vi_top / size_vi
p_other_top = size_other_top / size_other
p_vi_rest = 1 - p_vi_top
p_other_rest = 1- p_other_top
pooledSE = sqrt((p_vi_top * p_vi_rest/ size_vi) + ( p_other_top * p_other_rest / size_other))
z_test = (p_other_top - p_vi_top) / pooledSE
p_value = norm.sf(z_test)
return round(p_value,round_default)
def Cdf(_input_array, x):
"""
Compute the CDF value of input samples using left tail computation
Attributes:
_input_array: list of data points
x: current K value
Return: value of cdf
"""
# left tail
count = 0.0
for vi in _input_array:
if vi <= x:
count += 1.0
prob = count / len(_input_array)
return prob
def computeFairRankingProbability(k,p,generated_ranking,default_alpha=0.05):
"""
Sub-function to compute p-value used in FA*IR oracle
Attributes:
k: top_K value in FA*IR
p: minimum proportion of protected group
generated_ranking: input ranking of users
default_alpha: default significance level of FA*IR
Return: p-value, fairness, rank position fail, adjusted significance level and list of ranking positions that protected group should be using FA*IR
"""
## generated_ranking is a list of tuples (id, "pro"),...(id,"unpro")
gft = FairnessInRankingsTester(p, default_alpha, k, correctedAlpha=True)
posAtFail, isFair = gft.ranked_group_fairness_condition(generated_ranking)
p_value = gft.calculate_p_value_left_tail(k, generated_ranking)
return p_value, isFair, posAtFail, gft.alpha_c, gft.candidates_needed
def computePairN(att_name, att_value,current_file):
"""
Sub-function to compute number of pairs that input value > * used in Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
Return: number of pairs of att_value > * in input data, number of pairs of att_value > * estimated using proportion, and proportion of group with att_value
"""
# input checked_atts includes names of checked sensitive attributes
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# get the unique value of this sensitive attribute
values_att = list (data[att_name].unique())
# for each value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name]==att_value].index+1
size_vi = len(position_lists_val)
count_vi_prefered_pairs = 0
for i in range(len(position_lists_val)):
cur_position = position_lists_val[i]
left_vi = size_vi - (i + 1)
count_vi_prefered_pairs = count_vi_prefered_pairs + (total_N - cur_position - left_vi)
# compute estimated fair pairs
total_pairs_vi = size_vi*(total_N-size_vi)
estimated_vi_pair = math.ceil((size_vi / total_N) * total_pairs_vi)
return int(count_vi_prefered_pairs),int(estimated_vi_pair),int(size_vi)
def getSizeOfRanking(current_file):
"""
Compute size of generated ranking.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
Return: size of ranking
"""
data = pd.read_csv(current_file+"_weightsum.csv")
return len(data)
def getSizeOfDataset(current_file):
"""
Compute number of rows in the input data.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
Return: number of rows in current_file
"""
data = pd.read_csv(current_file+".csv")
return len(data)
def generateRanking(current_file,top_K=100):
"""
Generate a ranking of input data.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of returned generated ranking
Return: json data of a dataframe that stored the generated ranking
"""
ranks_file = current_file + "_rankings.json"
rankings_paras = read_json_file(ranks_file)
data =
|
pd.read_csv(current_file + ".csv")
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.