prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import argparse
import warnings
from pathlib import Path
from shutil import copyfile
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import xarray as xr
from src import utils as cutil
from src.models import epi
PLOT_DIR = cutil.RESULTS / "figures" / "appendix" / "sims"
def make_coeff_factorplot(
ds,
pop,
LHS_var,
title="",
xlabel="",
n_bins=40,
xlim=None,
hist_kwargs={"edgecolor": "none"},
fig_label=None,
**facet_kwargs,
):
"""Make a factorplot of histograms of regression-derived estimates vs. true values for
simulated outbreaks.
Parameters
----------
ds : :class:`xarray.Dataset`
Dataset containing regression results across samples. Must contain the dims
`pop`, ``LHS`, and any dims specified in `facet_kwargs`
pop : numeric
Population to make this plot for. Must be one of the values of `pop` in `ds`.
LHS_var : str
Plot results of regressions using this value as the left-hand-side variable.
Must be one of the values of `pop` in `ds`.
title : str, optional
Suptitle for this plot
xlabel : str, optional
Labels of x axes.
n_bins : int
Number of bins in histograms
xlim : float, optional
The left or right-most limit of the x axis, depending on whether the true value
is positive (for the no-policy growth rate) or negative (for policy effects).
The other axis limit is always 0. If None (default), use 2x the true value.
hist_kwargs : dict, optional
Pass to :func:`matplotlib.pyplot.hist`
fig_label : str, optional
Label for this figure panel if going in paper (e.g. "a" or "b")
facet_kwargs
Passed to :class:`xarray.plot.FacetGrid`
Returns
-------
g : :class:`xarray.plot.FacetGrid`
The output factorplot object
"""
this_true_val = ds.coefficient_true.item()
if this_true_val > 0:
xmin = 0
if xlim is None:
xmax = this_true_val * 2
else:
xmax = xlim
text_ha, text_x, text_y, leg_x, leg_y, leg_text = (
"left",
0.03,
0.05,
0.03,
0.55,
"Mean estimate",
)
elif this_true_val < 0:
if xlim is None:
xmin = this_true_val * 2
else:
xmin = -xlim
xmax = 0
text_ha, text_x, text_y, leg_x, leg_y, leg_text = (
"right",
0.97,
0.55,
0.03,
0.5,
"Mean\nestimate",
)
g = xr.plot.FacetGrid(ds.sel(LHS=LHS_var, pop=pop), sharey="row", **facet_kwargs)
g.map(lambda x: plt.axvline(x, color="k", label="Truth"), "coefficient_true")
for ax in g.axes.flat:
ax.set_xlim(xmin, xmax)
def nowarn_hist(data, *args, xmin=None, xmax=None, **kwargs):
binmin = max(xmin, np.nanmin(data))
binmax = min(xmax, np.nanmax(data))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return plt.hist(
data, *args, bins=np.linspace(binmin, binmax, n_bins), **kwargs
)
g.map(nowarn_hist, "coefficient", xmin=xmin, xmax=xmax, **hist_kwargs)
g.map(
lambda x: plt.axvline(
np.nanmean(x), color="tab:grey", linestyle="dashed", label=leg_text
),
"coefficient",
)
g.map(
lambda x, z: plt.text(
text_x,
text_y,
f"$S_{{min}}$: {x.min().item():.2f}\nBias: {(np.nanmean(z)-this_true_val) / this_true_val:.1%}",
horizontalalignment=text_ha,
verticalalignment="bottom",
transform=plt.gca().transAxes,
),
"S_min",
"coefficient",
)
g.axes.flat[0].legend(loc=(leg_x, leg_y))
g.set_xlabels(xlabel, fontweight="bold")
g.set_titles("$\{coord} = {value}$")
[t.set_text(t.get_text().replace("inf", "\infty")) for t in g.row_labels]
g.map(lambda: plt.yticks([]))
if fig_label is not None:
g.fig.text(
0.03, 0.97, fig_label, fontsize=7, fontweight="bold", va="top", ha="left"
)
g.fig.subplots_adjust(top=0.9)
g.fig.suptitle(title, va="bottom", y=0.95)
sns.despine(g.fig, left=True)
return g
def make_all_coeff_factorplots(
dir_in, plot_dir=None, LHS_vars=[], save_source_data=None, save_paper_figs=False
):
"""Create factorplots of estimated coefficients derived from simulated outbreaks.
Must have previously created the regression results, which is currently done
by running the ``iwb-simulator.ipynb`` notebook.
Parameters
----------
dir_in : str
The directory containing the regression results. Must contain subdirectories
``SIR`` and ``SEIR``.
plot_dir : str, optional
The directory where you would like to store plots. If None, do not save.
LHS_vars : list of str
Make plots for regressions run with these values as the left-hand-side. Using
the variable names of an SEIR model (e.g. `I` is active infectious cases, and `IR`
is active infectious cases + recovered cases)
save_source_data : str or :class:`pathlib.Path`
If not None, output the source data for these factorplots to this path. Only the
`IR` and `I` LHS vars are output (to match what is included in the Extended
Data)
save_paper_figs : bool
If True, save the subset of output figures used in the Extended Data of the
manuscript to ``results/figures/appendix/FigED[8,9]`` and the source data to
``results/source_data/ExtendedDataFigure89.csv``.
"""
coeffs = epi.load_and_combine_reg_results(
dir_in, cols_to_keep=["effect", "Intercept", "S_min", "rmse"]
)
coeffs = epi.calc_cum_effects(coeffs)
if plot_dir is not None:
plot_dir = Path(plot_dir)
plot_dir.mkdir(exist_ok=True)
print("Creating factorplots...")
## loop over population
facet_kwargs = dict(row="sigma", col="gamma")
hist_kwargs = {"edgecolor": "none"}
for px, p in enumerate(coeffs.pop.values):
print(f"...Population {px+1}/{len(coeffs.pop.values)}")
## loop over LHS vars
for LHS in ["I", "IR"]:
LHS_dict = {"I": "Active\ Cases", "IR": "Cumulative\ Cases"}
if p == 1e8 and LHS == "I":
fig_label = "a"
elif p == 1e5 and LHS == "IR":
fig_label = "b"
else:
fig_label = None
title_suffix = (
f"(pop. size: {p:,}; dep. variable: $\Delta log({LHS_dict[LHS]})$)"
)
# loop over
for var in coeffs.policy.values:
if var == "Intercept":
title = (
"$\\bf{Infection\ growth\ rate\ without\ policy}$\n"
+ title_suffix
)
xlabel = "Estimated daily growth rate"
color = cutil.COLORS["no_policy_growth_rate"]
elif var == "cum_effect":
title = (
"$\\bf{Effect\ of\ all\ policies\ combined}$\n" + title_suffix
)
xlabel = "Estimated effect on daily growth rate"
color = cutil.COLORS["effect"]
else:
policy_num = int(var[1:])
title = f"Effect of policy {policy_num}"
xlabel = "Estimated effect on daily growth rate"
color = cutil.COLORS["effect"]
# make factorplot
g = make_coeff_factorplot(
coeffs.sel(policy=var),
p,
LHS,
title=title,
figsize=(6.5, 3.25),
fig_label=fig_label,
xlabel=xlabel,
xlim=0.6,
hist_kwargs={**hist_kwargs, "facecolor": color, "alpha": 0.8},
**facet_kwargs,
)
if plot_dir is not None:
for suffix in ["pdf", "png"]:
g.fig.savefig(
plot_dir / f"{var}_pop_{p}_LHS_{LHS}.{suffix}",
dpi=300,
tight_layout=True,
bbox_inches="tight",
)
plt.clf()
if save_paper_figs:
source_path = Path(cutil.RESULTS / "source_data" / "ExtendedDataFigure89.csv")
out_base = Path(cutil.RESULTS / "figures" / "appendix")
# source data
coeff_lim = coeffs.sel(policy=["Intercept", "cum_effect"])[
["S_min", "coefficient", "coefficient_true"]
]
df_a = coeff_lim.sel(LHS="I", pop=1e8).to_dataframe()
df_b = coeff_lim.sel(LHS="IR", pop=1e5).to_dataframe()
| pd.concat((df_a, df_b)) | pandas.concat |
# 必要なライブラリをインポート
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import japanize_matplotlib
import mca
from adjustText import adjust_text
def correspondence(df_test):
# 質的データ版の主成分分析を実行する
mca_counts = mca.MCA(df_test)
rows = mca_counts.fs_r(N=2) # 表側データ
cols = mca_counts.fs_c(N=2) # 表頭データ
# 表側・表頭データの第一成分と第二成分の値をデータフレームとして出力する
df_rows = pd.DataFrame([rows[:,0], rows[:,1]], columns=df_test.index, index=['X', 'Y']).T
df_cols = pd.DataFrame([cols[:,0], cols[:,1]], columns=df_test.columns, index=['X', 'Y']).T
df_rows_cols = | pd.concat([df_rows, df_cols]) | pandas.concat |
#!/usr/bin/env python
from scipy import sparse
from sklearn.datasets import dump_svmlight_file
from sklearn.preprocessing import LabelEncoder
import argparse
import logging
import numpy as np
import os
import pandas as pd
from kaggler.util import encode_categorical_features, normalize_numerical_feature
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG)
def generate_feature(train_file, label_file, test_file, feature_dir,
feature_name):
# Load data files
logging.info('Loading training and test data')
trn = pd.read_csv(train_file, index_col=0)
tst = pd.read_csv(test_file, index_col=0)
label = | pd.read_csv(label_file, index_col=0) | pandas.read_csv |
"""
Utilities to use with market_calendars
"""
import itertools
import warnings
import pandas as pd
def merge_schedules(schedules, how='outer'):
"""
Given a list of schedules will return a merged schedule. The merge method (how) will either return the superset
of any datetime when any schedule is open (outer) or only the datetime where all markets are open (inner)
CAVEATS:
* This does not work for schedules with breaks, the break information will be lost.
* Onlu "market_open" and "market_close" are considered, other market times are not yet supported.
:param schedules: list of schedules
:param how: outer or inner
:return: schedule DataFrame
"""
all_cols = [x.columns for x in schedules]
all_cols = list(itertools.chain(*all_cols))
if ('break_start' in all_cols) or ('break_end' in all_cols):
warnings.warn('Merge schedules will drop the break_start and break_end from result.')
result = schedules[0]
for schedule in schedules[1:]:
result = result.merge(schedule, how=how, right_index=True, left_index=True)
if how == 'outer':
result['market_open'] = result.apply(lambda x: min(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: max(x.market_close_x, x.market_close_y), axis=1)
elif how == 'inner':
result['market_open'] = result.apply(lambda x: max(x.market_open_x, x.market_open_y), axis=1)
result['market_close'] = result.apply(lambda x: min(x.market_close_x, x.market_close_y), axis=1)
else:
raise ValueError('how argument must be "inner" or "outer"')
result = result[['market_open', 'market_close']]
return result
def convert_freq(index, frequency):
"""
Converts a DateTimeIndex to a new lower frequency
:param index: DateTimeIndex
:param frequency: frequency string
:return: DateTimeIndex
"""
return pd.DataFrame(index=index).asfreq(frequency).index
class _date_range:
"""
This is a callable class that should be used by calling the already initiated instance: `date_range`.
Given a schedule, it will return a DatetimeIndex with all of the valid datetimes at the frequency given.
The schedule values are assumed to be in UTC.
The calculations will be made for each trading session. If the passed schedule-DataFrame doesn't have
breaks, there is one trading session per day going from market_open to market_close, otherwise there are two,
the first one going from market_open to break_start and the second one from break_end to market_close.
*Any trading session where start == end is considered a 'no-trading session' and will always be dropped*
CAVEATS:
* Only "market_open", "market_close" (and, optionally, "breaak_start" and "break_end")
are considered, other market times are not yet supported by this class.
* If the difference between start and end of a trading session is smaller than an interval of the
frequency, and closed= "right" and force_close = False, the whole session will disappear.
This will also raise a warning.
Signature:
.__call__(self, schedule, frequency, closed='right', force_close=True, **kwargs)
:param schedule: schedule of a calendar, which may or may not include break_start and break_end columns
:param frequency: frequency string that is used by pd.Timedelta to calculate the timestamps
this must be "1D" or higher frequency
:param closed: the way the intervals are labeled
'right': use the end of the interval
'left': use the start of the interval
None: (or 'both') use the end of the interval but include the start of the first interval (the open)
:param force_close: how the last value of a trading session is handled
True: guarantee that the close of the trading session is the last value
False: guarantee that there is no value greater than the close of the trading session
None: leave the last value as it is calculated based on the closed parameter
:param kwargs: unused. Solely for compatibility.
"""
def __init__(self, schedule = None, frequency= None, closed='right', force_close=True):
if not closed in ("left", "right", "both", None):
raise ValueError("closed must be 'left', 'right', 'both' or None.")
elif not force_close in (True, False, None):
raise ValueError("force_close must be True, False or None.")
self.closed = closed
self.force_close = force_close
self.has_breaks = False
if frequency is None: self.frequency = None
else:
self.frequency = pd.Timedelta(frequency)
if self.frequency > pd.Timedelta("1D"):
raise ValueError('Frequency must be 1D or higher frequency.')
elif schedule.market_close.lt(schedule.market_open).any():
raise ValueError("Schedule contains rows where market_close < market_open,"
" please correct the schedule")
if "break_start" in schedule:
if not all([
schedule.market_open.le(schedule.break_start).all(),
schedule.break_start.le(schedule.break_end).all(),
schedule.break_end.le(schedule.market_close).all()]):
raise ValueError("Not all rows match the condition: "
"market_open <= break_start <= break_end <= market_close, "
"please correct the schedule")
self.has_breaks = True
def _check_overlap(self, schedule):
"""checks if calculated end times would overlap with the next start times.
Only an issue when force_close is None and closed != left.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:raises ValueError:"""
if self.force_close is None and self.closed != "left":
num_bars = self._calc_num_bars(schedule)
end_times = schedule.start + num_bars * self.frequency
if end_times.gt(schedule.start.shift(-1)).any():
raise ValueError(f"The chosen frequency will lead to overlaps in the calculated index. "
f"Either choose a higher frequency or avoid setting force_close to None "
f"when setting closed to 'right', 'both' or None.")
def _check_disappearing_session(self, schedule):
"""checks if requested frequency and schedule would lead to lost trading sessions.
Only necessary when force_close = False and closed = "right".
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:raises UserWarning:"""
if self.force_close is False and self.closed == "right":
if (schedule.end- schedule.start).lt(self.frequency).any():
warnings.warn("An interval of the chosen frequency is larger than some of the trading sessions, "
"while closed== 'right' and force_close is False. This will make those trading sessions "
"disappear. Use a higher frequency or change the values of closed/force_close, to "
"keep this from happening.")
def _calc_num_bars(self, schedule):
"""calculate the number of timestamps needed for each trading session.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:return: pd.Series of float64"""
num_bars = (schedule.end - schedule.start) / self.frequency
remains = num_bars % 1 # round up, np.ceil-style
return num_bars.where(remains == 0, num_bars + 1 - remains).round()
def _calc_time_series(self, schedule):
"""Method used by date_range to calculate the trading index.
:param schedule: pd.DataFrame with first column: 'start' and second column: 'end'
:return: pd.Series of datetime64[ns, UTC]"""
num_bars = self._calc_num_bars(schedule)
# ---> calculate the desired timeseries:
if self.closed == "left":
opens = schedule.start.repeat(num_bars) # keep as is
time_series = (opens.groupby(opens.index).cumcount()) * self.frequency + opens
elif self.closed == "right":
opens = schedule.start.repeat(num_bars) # dont add row but shift up
time_series = (opens.groupby(opens.index).cumcount()+ 1) * self.frequency + opens
else:
num_bars += 1
opens = schedule.start.repeat(num_bars) # add row but dont shift up
time_series = (opens.groupby(opens.index).cumcount()) * self.frequency + opens
if not self.force_close is None:
time_series = time_series[time_series.le(schedule.end.repeat(num_bars))]
if self.force_close:
time_series = pd.concat([time_series, schedule.end]).sort_values()
return time_series
def __call__(self, schedule, frequency, closed='right', force_close=True, **kwargs):
"""
See class docstring for more information.
:param schedule: schedule of a calendar, which may or may not include break_start and break_end columns
:param frequency: frequency string that is used by pd.Timedelta to calculate the timestamps
this must be "1D" or higher frequency
:param closed: the way the intervals are labeled
'right': use the end of the interval
'left': use the start of the interval
None: (or 'both') use the end of the interval but include the start of the first interval
:param force_close: how the last value of a trading session is handled
True: guarantee that the close of the trading session is the last value
False: guarantee that there is no value greater than the close of the trading session
None: leave the last value as it is calculated based on the closed parameter
:param kwargs: unused. Solely for compatibility.
:return: pd.DatetimeIndex of datetime64[ns, UTC]
"""
self.__init__(schedule, frequency, closed, force_close)
if self.has_breaks:
# rearrange the schedule, to make every row one session
before = schedule[["market_open", "break_start"]].set_index(schedule["market_open"])
after = schedule[["break_end", "market_close"]].set_index(schedule["break_end"])
before.columns = after.columns = ["start", "end"]
schedule = | pd.concat([before, after]) | pandas.concat |
import numpy as np
import pandas as pd
from betterself.utils.pandas_utils import get_empty_timezone_aware_series_containing_index_of_today
from constants import SLEEP_CUTOFF_TIME, SLEEP_MINUTES_COLUMN, VERY_PRODUCTIVE_TIME_LABEL, PRODUCTIVE_TIME_LABEL, \
NEUTRAL_TIME_LABEL, DISTRACTING_TIME_LABEL, VERY_DISTRACTING_TIME_LABEL
SOURCE_COLUMN_NAME = 'Source'
QUANTITY_COLUMN_NAME = 'Quantity'
SUPPLEMENT_COLUMN_NAME = 'Supplement'
TIME_COLUMN_NAME = 'Time'
SUPPLEMENT_EVENT_COLUMN_MAP = {
'source': SOURCE_COLUMN_NAME,
'supplement__name': SUPPLEMENT_COLUMN_NAME,
'quantity': QUANTITY_COLUMN_NAME,
'time': TIME_COLUMN_NAME,
}
DATE_LABEL = 'Date'
PRODUCTIVITY_LOG_COLUMN_MAP = {
'source': SOURCE_COLUMN_NAME,
'date': DATE_LABEL,
'very_productive_time_minutes': VERY_PRODUCTIVE_TIME_LABEL,
'productive_time_minutes': PRODUCTIVE_TIME_LABEL,
'neutral_time_minutes': NEUTRAL_TIME_LABEL,
'distracting_time_minutes': DISTRACTING_TIME_LABEL,
'very_distracting_time_minutes': VERY_DISTRACTING_TIME_LABEL,
}
class DataFrameBuilder(object):
rename_columns = True
def build_dataframe(self):
if not self.values.exists():
return pd.DataFrame()
# Am I really a programmer or just a lego assembler?
# Pandas makes my life at least 20 times easier.
df = pd.DataFrame.from_records(self.values, index=self.index_column)
# make the columns and labels prettier
if self.rename_columns:
df = df.rename(columns=self.column_mapping)
df.index.name = TIME_COLUMN_NAME
try:
df.index = df.index.tz_convert(self.user.pytz_timezone)
except AttributeError:
# if attribute-error means the index is just a regular Index and
# that only dates (and not time) was passed
df.index = | pd.DatetimeIndex(df.index, tz=self.user.pytz_timezone) | pandas.DatetimeIndex |
from pathlib import Path
from abc import ABC, abstractmethod
import pandas as pd
from dppd import dppd
import pysam
from .common import reverse_complement, df_to_rows
from .gene import Gene, Transcript
from mbf_externals.util import lazy_method
import weakref
import mbf_pandas_msgpack as pandas_msgpack
import numpy as np
pd.read_msgpack = pandas_msgpack.read_msgpack
dp, X = dppd()
def include_in_downloads(func):
"""A decorator to collect the download funcs"""
func._include_in_downloads = True
return func
def class_with_downloads(cls):
cls._download_methods = []
for f in cls.__dict__.items():
if hasattr(f[1], "_include_in_downloads"):
cls._download_methods.append(f[1])
return cls
def ReadOnlyPropertyWithFunctionAccess(func):
"""With normal property, you can not (easily) retrieve
the function. This will return the value of the func
if you do x.prop and the func itsealf if you do type(x).prop
"""
class Property:
def __get__(self, inst, instcls):
if inst is None:
# instance attribute accessed on class, return self
return func
else:
return func(inst)
return Property()
class MsgPackProperty:
"""
a message pack property is a property x_y that get's
calculated by a method _prepare_x_y
and automatically stored/loaded by a caching job
as msgpack file.
the actual job used depends on the GenomeBase subclass
The dependency_callback get's called with the GenomeBase subclass
instance and can return dependencys for the generated job
The object has three members afterwards:
x_y -> get the value returned by _prepare_x_y (lazy load)
_prepare_x_y -> that's the one you need to implement,
it's docstring is copied to this propery
job_y -> the job that caches _prepare_x_y() results
"""
def __init__(self, dependency_callback=None, files_to_invariant_on_callback=None):
self.dependency_callback = dependency_callback
self.files_to_invariant_on_callback = files_to_invariant_on_callback
def msgpack_unpacking_class(cls):
msg_pack_properties = []
for d in list(cls.__dict__):
v = cls.__dict__[d]
if isinstance(v, MsgPackProperty):
if not "_" in d:
raise NotImplementedError(
"Do not know how to create job name for msg_pack_properties that do not containt _"
)
msg_pack_properties.append(d)
job_name = "job_" + d[d.find("_") + 1 :]
filename = d + ".msgpack"
calc_func = getattr(cls, f"_prepare_{d}")
def load(self, d=d, filename=filename, job_name=job_name):
if not hasattr(self, "_" + d):
fn = self.find_file(filename)
if not fn.exists():
raise ValueError(
f"{d} accessed before the respecting {job_name} call"
)
df = | pd.read_msgpack(fn) | pandas.read_msgpack |
import os
from datetime import date
import pandas as pd
import plot
import writeToS3 as s3
from pytrends.request import TrendReq
def lambda_handler(event, context):
# create local path
localPath = os.path.join('/tmp', 'interest_by_region')
if not os.path.exists(localPath):
os.makedirs(localPath)
keywords = ['ventilador', 'ventiladores', 'mascarilla', 'mascarillas medicas', 'mascarillas de proteccion',
'pantalla facial', 'tapabocas']
interest_by_region(keywords, "spanish", localPath)
return None
def interest_by_region(keywords, language, localPath):
country_code = pd.read_csv("tableconvert_csv_j8hnfj.csv", quotechar = "\"")
if language.lower() == 'spanish':
pytrend = TrendReq(hl='sp-SP')
else:
pytrend = TrendReq()
today = date.today()
march = "2020-03-01"
# there is a limit on 100 characters for keywords break them to multiple requests then
while len(keywords) > 0:
character_len = 0
keywords_split = []
for kk in keywords:
character_len += len(kk)
if character_len < 50:
keywords_split.append(kk)
for item in keywords_split:
keywords.remove(item)
pytrend.build_payload(kw_list=keywords_split, timeframe= march + " " + today.strftime("%Y-%m-%d"))
df_regions = pytrend.interest_by_region(inc_geo_code=True)
df_regions['country'] = df_regions.index
df_regions = | pd.merge(df_regions, country_code, left_on="geoCode", right_on="Alpha-2 code", how="left") | pandas.merge |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: | pd.Timestamp("2012-07-04 00:00:00") | pandas.Timestamp |
# This script converts Wikiversity multiple-choice questions with choice-specific feedback to a standard Respondus format.
# import package
import pandas as pd
# set display options
| pd.set_option('display.max_rows', 500) | pandas.set_option |
import ast
import pandas
from pandas.io.json.normalize import nested_to_record
def flatten_data(data, json_column='data'):
json_data = data.pop(json_column)
# this gets at any nested dicts as well
flat_data = pandas.DataFrame(nested_to_record(json_data))
# rename the columns so they can be un-flattened later
flat_data.columns = ['{0}.{1}'.format(json_column, i) for i in flat_data.columns.values]
other_data = pandas.DataFrame(data)
return | pandas.concat([other_data, flat_data], axis=1) | pandas.concat |
"""
Functions for retrieving summary data from a dataset.
"""
from __future__ import annotations
import typing
from collections import defaultdict
import datetime
import warnings
import numpy as np
import pandas as pd
import pandas.io.formats.style
import idelib.dataset
from .measurement import MeasurementType, ANY, get_channels
from .files import get_doc
from .util import parse_time
__all__ = [
"get_channel_table",
"to_pandas",
"get_primary_sensor_data",
]
# ============================================================================
# Display formatting functions
# ============================================================================
def format_channel_id(ch: idelib.dataset.Channel) -> str:
""" Function for formatting an `idelib.dataset.Channel` or `SubChannel`
for display. Renders as only the channel and subchannel IDs (the other
information is shown in the rest of the table).
:param ch: The `idelib.dataset.Channel` or `idelib.dataset.SubChannel`
to format.
:return: A formatted "channel.subchannel" string.
"""
try:
if ch.parent:
return f"{ch.parent.id}.{ch.id}"
else:
return f"{ch.id}.*"
except (AttributeError, TypeError, ValueError) as err:
warnings.warn(f"format_channel_id({ch!r}) raised {type(err).__name__}: {err}")
return str(ch)
def format_timedelta(val: typing.Union[int, float, datetime.datetime, datetime.timedelta]) -> str:
""" Function for formatting microsecond timestamps (e.g., start, end,
or duration) as times. Somewhat more condensed than the standard
`DataFrame` formatting of `datetime.timedelta`.
:param val: The `pandas.Timedelta` or `datetime.timedelta` to format.
Will also work with microseconds as `float` or `int`.
:return: A formatted time 'duration' string.
"""
try:
if isinstance(val, datetime.timedelta):
td = pd.Timedelta(val)
else:
td = pd.Timedelta(microseconds=val)
# NOTE: `components` attr only exists in pandas `Timedelta`
c = td.components
s = f"{c.minutes:02d}:{c.seconds:02d}.{c.milliseconds:04d}"
if c.hours or c.days:
s = f"{c.hours:02d}:{s}"
if c.days:
s = f"{c.days}d {s}"
return s
except (AttributeError, TypeError, ValueError) as err:
warnings.warn(f"format_timedelta({val!r}) raised {type(err).__name__}: {err}")
return str(val)
def format_timestamp(ts: typing.Union[int, float]) -> str:
""" Function for formatting start/end timestamps. Somewhat more condensed
than the standard Pandas formatting.
:param ts: The timestamps in microseconds. Rendered as integers, since
`idelib` timestamps have whole microsecond resolution.
:return: A formatted timestamp string, with units.
"""
try:
return f"{int(ts)} µs"
except (TypeError, ValueError) as err:
warnings.warn(f"format_timestamp({ts!r}) raised {type(err).__name__}: {err}")
return str(ts)
# ============================================================================
#
# ============================================================================
""" The default table formatting. """
TABLE_FORMAT = {
'channel': format_channel_id,
'start': format_timedelta,
'end': format_timedelta,
'duration': format_timedelta,
'rate': "{:.2f} Hz",
}
def get_channel_table(dataset: typing.Union[idelib.dataset.Dataset, list],
measurement_type=ANY,
start: typing.Union[int, float, str, datetime.datetime, datetime.timedelta] = 0,
end: typing.Optional[int, float, str, datetime.datetime, datetime.timedelta] = None,
formatting: typing.Optional[dict] = None,
index: bool = True,
precision: int = 4,
timestamps: bool = False,
**kwargs) -> typing.Union[pd.DataFrame, pd.io.formats.style.Styler]:
""" Get summary data for all `SubChannel` objects in a `Dataset` that
contain one or more type of sensor data. By using the optional
`start` and `end` parameters, information can be retrieved for a
specific interval of time.
The `start` and `end` times, if used, may be specified in several
ways:
* `int`/`float` (Microseconds from the recording start)
* `str` (formatted as a time from the recording start, e.g., `MM:SS`,
`HH:MM:SS`, `DDd HH:MM:SS`). More examples:
* ``":01"`` or ``":1"`` or ``"1s"`` (1 second)
* ``"22:11"`` (22 minutes, 11 seconds)
* ``"3:22:11"`` (3 hours, 22 minutes, 11 seconds)
* ``"1d 3:22:11"`` (1 day, 3 hours, 22 minutes, 11 seconds)
* `datetime.timedelta` or `pandas.Timedelta` (time from the
recording start)
* `datetime.datetime` (an explicit UTC time)
:param dataset: A `idelib.dataset.Dataset` or a list of
channels/subchannels from which to build the table.
:param measurement_type: A :py:class:`~endaq.ide.MeasurementType`, a
measurement type 'key' string, or a string of multiple keys
generated by adding and/or subtracting
:py:class:`~endaq.ide.MeasurementType` objects to filter the
results. Any 'subtracted' types will be excluded.
:param start: The starting time. Defaults to the start of the
recording.
:param end: The ending time. Defaults to the end of the recording.
:param formatting: A dictionary of additional style/formatting items
(see `pandas.DataFrame.style.format()`). If `False`, no additional
formatting is applied.
:param index: If `True`, show the index column on the left.
:param precision: The default decimal precision to display. Can be
changed later.
:param timestamps: If `True`, show the start and end as raw
microsecond timestamps.
:returns: A table (`pandas.io.formats.style.Styler`) of summary data.
:rtype: pandas.DataFrame
"""
# We don't support multiple sessions on current Slam Stick/enDAQ recorders,
# but in the event we ever do, this allows one to be specified like so:
# :param session: A `Session` or session ID to retrieve from a
# multi-session recording.
# Leave out of docstring until we ever support it.
session = kwargs.get('session', None)
if session:
session = getattr(session, 'sessionId', session)
if hasattr(dataset, 'getPlots'):
sources = get_channels(dataset, measurement_type)
else:
sources = dataset
result = defaultdict(list)
for source in sources:
range_start = range_end = duration = rate = session_start = None
samples = 0
data = source.getSession(session)
if data.session.utcStartTime:
session_start = datetime.datetime.utcfromtimestamp(data.session.utcStartTime)
start = parse_time(start, session_start)
end = parse_time(end, session_start)
if len(data):
if not start and not end:
start_idx, end_idx = 0, -1
samples = len(data)
else:
start_idx, end_idx = data.getRangeIndices(start, end)
end_idx = min(len(data) - 1, end_idx)
if end_idx < 0:
samples = len(data) - start_idx - 1
else:
samples = end_idx - start_idx
range_start = data[int(start_idx)][0]
range_end = data[int(end_idx)][0]
duration = range_end - range_start
rate = samples / (duration / 10 ** 6)
result['channel'].append(source)
result['name'].append(source.name)
result['type'].append(source.units[0])
result['units'].append(source.units[1])
result['start'].append(range_start)
result['end'].append(range_end)
result['duration'].append(duration)
result['samples'].append(samples)
result['rate'].append(rate)
# # TODO: RESTORE AFTER FIX IN idelib
# dmin, dmean, dmax = data.getRangeMinMeanMax(start, end)
# result['min'].append(dmin)
# result['mean'].append(dmean)
# result['max'].append(dmax)
if formatting is False:
return | pd.DataFrame(result) | pandas.DataFrame |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS")
# Existing_p
Existing_p = df.Existing_p.copy()
Existing_p[Existing_p == ' '] = np.nan
Existing_p = pd.get_dummies(Existing_p, prefix="Existing_p")
# PropertyTy
PropertyTy = df.PropertyTy.copy()
PropertyTy = pd.get_dummies(PropertyTy, prefix="PropertyTy")
# secondaryT
secondaryT = df.secondaryT.copy()
secondaryT[secondaryT == ' '] = np.nan
secondaryT = pd.get_dummies(secondaryT, prefix="secondaryT")
# LUC
LUC = df.LUC.copy()
LUC[LUC == ' '] = np.nan
LUC = pd.get_dummies(LUC, prefix="LUC")
# Taxes_Per_
Taxes_Per_ = df.Taxes_Per_.copy()
Taxes_Per_zero = (Taxes_Per_ == "0").apply(int)
Taxes_Per_zero.name = 'Taxes_Per_zero'
Taxes_Per_ = Taxes_Per_.str.replace(',','').astype(float)
Taxes_Per_ = np.log1p(Taxes_Per_)
Taxes_Per_ = Taxes_Per_ / Taxes_Per_.max()
Taxes_Per_ = pd.concat([Taxes_Per_, Taxes_Per_zero], axis=1)
# Taxes_Tota
Taxes_Tota = df.Taxes_Tota.copy()
Taxes_Tota_zero = (Taxes_Tota == "0").apply(int)
Taxes_Tota_zero.name = 'Taxes_Tota_zero'
Taxes_Tota = Taxes_Tota.str.replace(',','').astype(float)
Taxes_Tota = np.log1p(Taxes_Tota)
Taxes_Tota = Taxes_Tota / Taxes_Tota.max()
Taxes_Tota = pd.concat([Taxes_Tota, Taxes_Tota_zero], axis=1)
# TOT_APPR
TOT_APPR = df.TOT_APPR.copy()
TOT_APPR_zero = (TOT_APPR == "0").apply(int)
TOT_APPR_zero.name = 'TOT_APPR_zero'
TOT_APPR = TOT_APPR.str.replace(',','').astype(float)
TOT_APPR = np.log1p(TOT_APPR)
TOT_APPR = TOT_APPR / TOT_APPR.max()
TOT_APPR = pd.concat([TOT_APPR, TOT_APPR_zero], axis=1)
# VAL_ACRES
VAL_ACRES = df.VAL_ACRES.copy()
VAL_ACRES_zero = (VAL_ACRES == 0).apply(int)
VAL_ACRES_zero.name = 'VAL_ACRES_zero'
VAL_ACRES = np.log1p(VAL_ACRES)
VAL_ACRES = VAL_ACRES / VAL_ACRES.max()
VAL_ACRES = pd.concat([VAL_ACRES, VAL_ACRES_zero], axis=1)
# For_Sale_P
For_Sale_P = df.For_Sale_P.copy()
For_Sale_P_notNA = (For_Sale_P != " ").apply(int)
For_Sale_P_notNA.name = 'For_Sale_P_notNA'
For_Sale_P[For_Sale_P == ' '] = 0
For_Sale_P = For_Sale_P.astype(float)
For_Sale_P = np.log1p(For_Sale_P)
For_Sale_P = For_Sale_P / For_Sale_P.max()
For_Sale_P = pd.concat([For_Sale_P, For_Sale_P_notNA], axis=1)
# Last_Sale1
Last_Sale1 = df.Last_Sale1.copy()
Last_Sale1_zero = (Last_Sale1 == "0").apply(int)
Last_Sale1_zero.name = "Last_Sale1_zero"
Last_Sale1 = Last_Sale1.str.replace(',','').astype(float)
Last_Sale1 = np.log1p(Last_Sale1)
Last_Sale1 = (Last_Sale1 - Last_Sale1.min()) / (Last_Sale1.max() - Last_Sale1.min())
Last_Sale1 = pd.concat([Last_Sale1, Last_Sale1_zero], axis=1)
# yearbuilt
yearbuilt = df.yearbuilt.copy()
yearbuilt_zero = (yearbuilt == "0").apply(int)
yearbuilt_zero.name = "yearbuilt_zero"
yearbuilt[yearbuilt == "0"] = np.nan
yearbuilt = yearbuilt.str.replace(',','').astype(float)
yearbuilt = (yearbuilt - yearbuilt.min()) / (yearbuilt.max() - yearbuilt.min())
yearbuilt = yearbuilt.fillna(0)
yearbuilt = pd.concat([yearbuilt, yearbuilt_zero], axis=1)
# year_reno
year_reno = df.year_reno.copy()
reno = (year_reno != "0").apply(int)
reno.name = "reno"
year_reno[year_reno == "0"] = np.nan
year_reno = year_reno.str.replace(',','').astype(float)
year_reno = (year_reno - year_reno.min()) / (year_reno.max() - year_reno.min())
year_reno = year_reno.fillna(0)
year_reno = pd.concat([year_reno, reno], axis=1)
# Lot_Condition
Lot_Condition = df.Lot_Condition.copy()
Lot_Condition[Lot_Condition == ' '] = np.nan
Lot_Condition = pd.get_dummies(Lot_Condition, prefix="Lot_Condition")
# Structure_Condition
Structure_Condition = df.Structure_Condition.copy()
Structure_Condition[Structure_Condition == ' '] = np.nan
Structure_Condition = | pd.get_dummies(Structure_Condition, prefix="Structure_Condition") | pandas.get_dummies |
""" Read groundwater measurement data from dinoloket csv files.
Author : <NAME>, 2020
History: 02-02-2014 created for python2.7;
15-08-2015 migrated to python3.x;
06-07-2019 migrated to acequia
"""
import os
import os.path
from datetime import datetime
import csv
import time
import datetime as dt
import warnings
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
sep = ","
def read_dinogws():
""" read data from Dinoloket csv file with groundwater measurement
data """
pass
class DinoGws:
"""Read TNO Dinoloket csv file with groundwater measurement data"""
_metatag = ','.join(
['Locatie','Filternummer','Externe aanduiding',
'X-coordinaat','Y-coordinaat','Maaiveld (cm t.o.v. NAP)',
'Datum maaiveld gemeten','Startdatum','Einddatum',
'Meetpunt (cm t.o.v. NAP)','Meetpunt (cm t.o.v. MV)',
'Bovenkant filter (cm t.o.v. NAP)',
'Onderkant filter (cm t.o.v. NAP)'
])
_datatag = ','.join(
['Locatie','Filternummer','Peildatum',
'Stand (cm t.o.v. MP)','Stand (cm t.o.v. MV)',
'Stand (cm t.o.v. NAP)','Bijzonderheid,Opmerking','','',''
])
_missingdata = (f'Van deze put zijn geen standen opgenomen',
f'in de DINO-database')
_header_cols = ["nitgcode","filter","tnocode","xcoor",
"ycoor","mvcmnap","mvdatum","startdatum","einddatum",
"mpcmnap","mpcmmv","filtopcmnap","filbotcmnap"]
_data_cols = ["nitgcode","filter","peildatum","standcmmp",
"standcmmv","standcmnap","bijzonderheid","opmerking"]
_head_cols = ['peildatum','standcmmp','bijzonderheid','opmerking']
def __repr__(self):
return (f'{self.srname()} (n={len(self._data)})')
def __init__(self,filepath=None,readall=True):
# lines marking data blocks in dinofiles
if isinstance(readall,bool):
self.readall=readall
else:
self.readall=True
wrnstr = 'Variable \'{vname}\' not of type boolean' \
'of type \'{tname}\'. Data wil be read.'.format(
vname=readall,tname=type(readall))
warnings.warn(warnstr)
# herekenningsregels dinofiles
self._metatag = "Locatie,Filternummer,Externe aanduiding,X-coordinaat,Y-coordinaat,Maaiveld (cm t.o.v. NAP),Datum maaiveld gemeten,Startdatum,Einddatum,Meetpunt (cm t.o.v. NAP),Meetpunt (cm t.o.v. MV),Bovenkant filter (cm t.o.v. NAP),Onderkant filter (cm t.o.v. NAP)"
self._datatag = "Locatie,Filternummer,Peildatum,Stand (cm t.o.v. MP),Stand (cm t.o.v. MV),Stand (cm t.o.v. NAP),Bijzonderheid,Opmerking,,,"
self._missingdata = "Van deze put zijn geen standen opgenomen in de DINO-database"
self._header_cols = ["nitgcode","filter","tnocode","xcoor","ycoor","mvcmnap","mvdatum","startdatum","einddatum","mpcmnap","mpcmmv","filtopcmnap","filbotcmnap"]
self._data_cols = ["nitgcode","filter","peildatum","standcmmp","standcmmv","standcmnap","bijzonderheid","opmerking"]
# create empty variables
self._reset()
if filepath != None:
self.flines = self._readfile(filepath)
self._header, self._data = self._readlines()
if self._header.empty and not self._data.empty:
self._header = DataFrame(data=[[np.nan]*len(self._header_cols)],columns=self._header_cols)
self._header = self._header.astype({'nitgcode':str,'filter':str})
self._header.at[0,'nitgcode'] = self._data.at[0,'nitgcode']
self._header.at[0,'filter'] = self._data.at[0,'filter']
self._header.at[0,'startdatum'] = self._data.at[0,'peildatum']
#self._tubeprops.at[0,'startdate'] = heads.index[0]
def _reset(self):
""" Reset all variables """
self.filepath = ""
self.errors = []
self._data = DataFrame()
self._header = DataFrame()
self._datatext = DataFrame()
self._headertext = DataFrame()
self.dfdesc = DataFrame()
self.dfdescloc = DataFrame()
self.seriesname = ""
def _readfile(self,filepath):
""" Open DINO csv file and return list of filelines """
self._reset()
self.filepath = filepath
try:
self.file = open(self.filepath,'r')
except (IOError, TypeError) as err:
errno, strerror = err.args
print("{!s}".format(errno), end="")
print("I/O fout{!s}".format(strerror), end="")
print (" : "+self.filepath)
self.errors.append(
[self.filepath,
"File can not be opened"])
self.flines=[]
raise
else:
self.flines = self.file.readlines()
self.file.close()
return self.flines
def _readlines(self):
""" read list of file lines from dinofile to data """
# assert file is valid ascii
if len(self.flines)==0:
file_valid = False
elif self.flines[0][0]=='\x00':
# test for corrupted file with only 'x00'
# Yes, I have really seen this
file_valid=False
else:
file_valid = True
if file_valid==False:
self.headerstart=0
self.headerend=0
self.datastart=0
else:
# findlines in dinofile lines
self.headerstart, self.headerend, self.datastart = \
self._findlines()
# read header
if self.headerstart>0 and self.headerend>0:
self._header = self._readheader()
else:
self._header = DataFrame()
# read data
if self.datastart>0:
self._data = self._readgws()
else:
self._data = DataFrame()
return self._header, self._data
def _findlines(self):
""" Find start of header and data; if file has data at all """
# set variables to find at zero
self.headerstart = 0
self.headerend = 0
self.datastart = 0
# find variables
for il in range(len(self.flines)):
if self.flines[il].startswith(self._missingdata):
# put zonder gegevens
self.errors.append([self.filepath,"Bestand bevat geen data"])
self.hasheader = False
self.hasdata = False
break
if self.flines[il].startswith(self._metatag): #("Locatie,Filternummer,Externe"):
if not self.flines[il+1].startswith("B"): # er zijn geen headerlines onder de headerkop
self.hasheader = False
self.errors.append([self.filepath,"Bestand zonder header"])
else:
while True:
il+=1
if self.flines[il].startswith("B"):
if self.headerstart==0:
self.hasheader = True
self.headerstart = il
#self.headerlength = 1
#else:
# self.headerlength+=1
else: #voorbij laatste regel header
self.headerend = il
break
if self.flines[il].startswith(self._datatag): #("Locatie,Filternummer,Peildatum"):
# bepaal eerste regelnummer met data
il+=1
if self.flines[il].startswith("B"):
self.hasdata = True
self.datastart = il
else:
self.hasdata = False
self.errors.append([self.filepath,"Bestand zonder grondwaterstanden"])
break
il+=1
# end of def findlines
return self.headerstart, self.headerend, self.datastart
@staticmethod
def parse_dino_date(datestring,addtime=False):
if isinstance(datestring, str):
if datestring!="":
# string to datetime.datetime object
if addtime==True: date = datetime.strptime(
datestring+" 12:00", "%d-%m-%Y %H:%M")
else: date = datetime.strptime(datestring, "%d-%m-%Y")
## replace invalid date with np.NaN
##if date.year < 1900: date = np.NaN
##elif date.year > datetime.now().year: date = np.NaN
else:
date = np.NaN
else:
date = np.NaN
return date
def _readheader(self): #public
""" Read header data and return pandas dataframe """
if self.headerstart>0 and self.headerend > self.headerstart:
# create _header
headerlist = [line[:-1].split(sep) for line in self.flines[self.headerstart:self.headerend]]
self._header = DataFrame(headerlist, columns=self._header_cols)
self._headertext = DataFrame(headerlist, columns=self._header_cols)
# transform column values
self._header["filter"] = self._header["filter"].apply(lambda x: x.lstrip("0"))
self._header["mvdatum"] = self._header["mvdatum"].apply(lambda x:self.parse_dino_date(x))
self._header["startdatum"] = self._header["startdatum"].apply(lambda x:self.parse_dino_date(x))
self._header["einddatum"] = self._header["einddatum"].apply(lambda x:self.parse_dino_date(x))
# make seriesname
##self.seriesname = self._header["nitgcode"].values[0]+"_"+str(self._header["filter"].values[0])
else:
# create empty dataframe
self._header = DataFrame(columns=self._header_cols)
self._headertext = DataFrame(columns=self._header_cols)
self.seriesname = "onbekend" # self.filename.split(".")[0]
#print("warning : series has no header")
return self._header
def _readgws(self):
""" Read groundwater measurements to pandas data frame """
def fstr2float(astr):
try:
aval = float(astr)
except ValueError:
aval = np.NaN
return aval
if self.datastart>0:
# create list of data from filelines
data = [line[:-1].split(sep)[0:7]+[sep.join(line[:-1].split(sep)[7:])] for line in self.flines[self.datastart:]]
self._datatext = | DataFrame(data,columns=self._data_cols) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
from glob import glob
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import os
data = | pd.read_csv('mydata.csv') | pandas.read_csv |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
def discriminated_antis(all_antis):
try:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv')
except:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv', encoding='gbk')
def isanti(x):
df_抗菌药物['药品'] = x.抗菌药物
df1 = df_抗菌药物[df_抗菌药物['规则等级']==1]
if x.抗菌药物 in list(df1['匹配规则'].values):
return df1[df1['匹配规则']==x.抗菌药物].reset_index(drop=True).loc[0]['抗菌药物通用名']
else:
df2 = df_抗菌药物[df_抗菌药物['规则等级']==2]
df2['是否匹配'] = df2.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df2['匹配长度'] = df2.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df2[~df2['是否匹配'].isnull()].shape[0]==0:
df3 = df_抗菌药物[df_抗菌药物['规则等级']==3]
df3['是否匹配'] = df3.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df3['匹配长度'] = df3.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df3[~df3['是否匹配'].isnull()].shape[0]==0:
df4 = df_抗菌药物[df_抗菌药物['规则等级']==4]
df4['是否匹配'] = df4.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df4['匹配长度'] = df4.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df4[~df4['是否匹配'].isnull()].shape[0]==0:
return np.nan
else:
return df4[~df4['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df3[~df3['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df2[~df2['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
all_antis['抗菌药物通用名'] = all_antis.apply(isanti, axis=1)
return all_antis
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'给药': "select '给药' as 业务类型 ,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where BEGINTIME is not null group by substr(BEGINTIME,1,7)",
'菌检出': " select '菌检出' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
'药敏': " select '药敏' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
print('抗菌药物-菌检出-药敏一级图一',bus)
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('anti_bar_drug_first_level_first_fig','figure'),
Output('anti_bar_drug_first_level_first_fig_data','data'),
Input('anti_bar_drug_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(anti_bar_drug_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if anti_bar_drug_first_level_first_fig_data is None:
anti_bar_drug_first_level_first_fig_data = {}
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data['btime'] = btime
anti_bar_drug_first_level_first_fig_data['etime'] = etime
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig_data = json.loads(anti_bar_drug_first_level_first_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_first_fig_data['hosname']:
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig = pd.read_json(anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'], orient='split')
anti_bar_drug_first_level_first_fig_data = dash.no_update
#
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig[(anti_bar_drug_first_level_first_fig['month']>=btime) & (anti_bar_drug_first_level_first_fig['month']<=etime)]
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(anti_bar_drug_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,anti_bar_drug_first_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取抗菌药物-菌检出-药敏一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '科室', '科室名称', 'num'])
bus_dic = {'8种耐药菌检出': f""" select '8种耐药菌检出' as 业务类型, t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
"限制级特殊级抗菌药物使用" : f"""select '限制级特殊级抗菌药物使用' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from ANTIBIOTICS where ALEVEL in ('限制类', '特殊类')
and substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
'药敏结果为耐药': f""" select '药敏结果为耐药' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from DRUGSUSCEPTIBILITY where SUSCEPTIBILITY like '%耐药%'
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
"""
}
for bus in bus_dic:
temp = pd.read_sql(bus_dic[bus],con=engine)
temp = temp[0:8]
res_数据关键字缺失及汇总 = res_数据关键字缺失及汇总.append(temp)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('anti_bar_drug_first_level_second_fig','figure'),
Output('anti_bar_drug_first_level_second_fig_data','data'),
# Output('rank_month_choice','min'),
# Output('rank_month_choice','max'),
# Output('rank_month_choice','value'),
# Output('rank_month_choice','marks'),
Input('anti_bar_drug_first_level_second_fig_data','data'),
# Input('rank_month_choice','value'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# Input('rank_month_choice','marks'),
# prevent_initial_call=True
)
# def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,rank_month_choice,db_con_url,count_time,marks):
def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,db_con_url,count_time):
# def unixTimeMillis(dt):
# return int(time.mktime(dt.timetuple()))
#
# def unixToDatetime(unix):
# return pd.to_datetime(unix, unit='s')
#
# def getMarks(start, end, Nth=100):
# result = {}
# for i, date in enumerate(daterange):
# result[unixTimeMillis(date)] = str(date.strftime('%Y-%m'))
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
min = dash.no_update
max = dash.no_update
value = dash.no_update
marks = dash.no_update
if anti_bar_drug_first_level_second_fig_data is None:
anti_bar_drug_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime+'-01', periods=((end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# marks = getMarks(daterange.min(), daterange.max())
else:
anti_bar_drug_first_level_second_fig_data = json.loads(anti_bar_drug_first_level_second_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps( anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime + '-01', periods=( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# print(value)
# marks = getMarks(daterange.min(), daterange.max())
else:
if anti_bar_drug_first_level_second_fig_data['btime'] != btime or anti_bar_drug_first_level_second_fig_data['etime'] != etime:
# if rank_month_choice is not None and len(rank_month_choice)>0:
# print(rank_month_choice)
# btime1 = time.gmtime(rank_month_choice[0])
# etime1 = time.gmtime(rank_month_choice[1])
# btime = f"{btime1.tm_year}-0{btime1.tm_mon}" if btime1.tm_mon<10 else f"{btime1.tm_year}-{btime1.tm_mon}"
# etime = f"{etime1.tm_year}-0{etime1.tm_mon}" if etime1.tm_mon<10 else f"{etime1.tm_year}-{etime1.tm_mon}"
# print(btime,etime)
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
anti_bar_drug_first_level_second_fig_data = dash.no_update
# print("一级第二张图数据:")
# print(rank_month_choice)
# print(marks)
bar = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='8种耐药菌检出']
anti = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='限制级特殊级抗菌药物使用']
drug = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='药敏结果为耐药']
bar = bar.sort_values(['num'], ascending=True)
anti = anti.sort_values(['num'], ascending=True)
drug = drug.sort_values(['num'], ascending=True)
fig = make_subplots(rows=1,cols=3)
fig.add_trace(
go.Bar(x=anti['num'], y=anti['科室名称'], orientation='h', name='给药', marker_color=px.colors.qualitative.Dark24[0]),
row=1, col=1
)
fig.add_trace(
go.Bar(x=drug['num'], y=drug['科室名称'], orientation='h', name='药敏',
marker_color=px.colors.qualitative.Dark24[1]),
row=1, col=2,
)
fig.add_trace(
go.Bar(x=bar['num'],y=bar['科室名称'],orientation='h',name='菌检出', marker_color=px.colors.qualitative.Dark24[2]),
row=1,col=3
)
# 设置水平图例及位置
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
return fig,anti_bar_drug_first_level_second_fig_data
# return fig,anti_bar_drug_first_level_second_fig_data,min ,max ,value ,marks
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
bus_dic = {'用药目的': f" select '用药目的缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'药物等级': f" select '药物等级缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'医嘱开始时间大于结束时间': f" select '医嘱开始时间大于结束时间' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime group by substr(BEGINTIME,1,7) ",
'医嘱时间在出入院时间之外' : f""" select '医嘱时间在出入院时间之外' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
group by substr(BEGINTIME,1,7)
""",
}
for bus in bus_dic:
res_数据科室信息缺失及汇总 = res_数据科室信息缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据科室信息缺失及汇总
# 更新二级图一
@app.callback(
Output('anti_second_level_first_fig','figure'),
Output('anti_second_level_first_fig_data','data'),
Input('anti_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(anti_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_first_fig_data is None:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data={}
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split', date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig_data = json.loads(anti_second_level_first_fig_data)
if db_con_url['hosname'] != anti_second_level_first_fig_data['hosname']:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
if anti_second_level_first_fig_data['btime'] != btime or anti_second_level_first_fig_data['etime'] != etime:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig = pd.read_json(anti_second_level_first_fig_data['anti_second_level_first_fig'], orient='split')
anti_second_level_first_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
bus_opts = anti_second_level_first_fig[['业务类型']].drop_duplicates().reset_index(drop=True)
# res_数据科室信息缺失及汇总 = anti_second_level_first_fig.sort_values(['month','业务类型'])
print(anti_second_level_first_fig)
for tem,bus in bus_opts.iterrows():
print(tem,)
print(bus,)
temp = anti_second_level_first_fig[anti_second_level_first_fig['业务类型']==bus['业务类型']]
print(temp)
temp = temp.sort_values(['month'])
if temp.shape[0]>0:
fig_概览一级_科室映射缺失.add_trace(
go.Scatter(x=temp['month'], y=temp['num'], name=bus['业务类型'] ,marker_color=px.colors.qualitative.Dark24[tem] )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="问题数量")
fig_概览一级_科室映射缺失.update_xaxes(title_text="月份")
return fig_概览一级_科室映射缺失,anti_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('anti_second_level_first_fig_date_detail', 'data'),
Input('anti_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'用药目的缺失': f" select * from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') ",
'药物等级缺失': f" select t1.* from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
'医嘱开始时间大于结束时间': f" select t1.* from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime ",
'医嘱时间在出入院时间之外': f""" select t1.* from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7)
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}抗菌药物问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第二张图数据
def get_second_level_second_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(f" select ANAME as 抗菌药物,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' group by substr(BEGINTIME,1,7),ANAME ",con=engine)
return res_业务逻辑问题数据汇总
# 更新二级图
@app.callback(
Output('anti_second_level_second_fig','figure'),
Output('anti_second_level_second_fig_data','data'),
Input('anti_second_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(anti_second_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_second_fig_data is None:
anti_second_level_second_fig_data = {}
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split', date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig_data = json.loads(anti_second_level_second_fig_data)
if db_con_url['hosname'] != anti_second_level_second_fig_data['hosname']:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
if anti_second_level_second_fig_data['btime'] != btime or anti_second_level_second_fig_data['etime'] != etime:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig = pd.read_json(anti_second_level_second_fig_data['anti_second_level_second_fig'], orient='split')
anti_second_level_second_fig_data = dash.no_update
antis_dict = discriminated_antis(anti_second_level_second_fig[['抗菌药物']].drop_duplicates())
anti_second_level_second_fig = anti_second_level_second_fig.merge(antis_dict,on='抗菌药物',how='left')
anti_second_level_second_fig['抗菌药物通用名'] = np.where(anti_second_level_second_fig['抗菌药物通用名'].isnull(),anti_second_level_second_fig['抗菌药物'],anti_second_level_second_fig['抗菌药物通用名'])
anti_second_level_second_fig = anti_second_level_second_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_second_fig, x="月份", y="num", color='抗菌药物通用名' ,color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_second_fig_data
# ----------------------------------------------------------------------------------------------------- 二级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第三张图数据
def get_second_level_third_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(
f" select ALEVEL as 抗菌药物等级,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ALEVEL is not null group by substr(BEGINTIME,1,7),ALEVEL ",
con=engine)
return res_业务逻辑问题数据汇总
# 三级第一张图更新
@app.callback(
Output('anti_second_level_third_fig','figure'),
Output('anti_second_level_third_fig_data', 'data'),
Input('anti_second_level_third_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(anti_second_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_third_fig_data is None:
anti_second_level_third_fig_data = {}
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json( orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig_data = json.loads(anti_second_level_third_fig_data)
if db_con_url['hosname'] != anti_second_level_third_fig_data['hosname']:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
if anti_second_level_third_fig_data['btime'] != btime or anti_second_level_third_fig_data['etime'] != etime:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig = pd.read_json( anti_second_level_third_fig_data['anti_second_level_third_fig'], orient='split')
anti_second_level_third_fig_data = dash.no_update
anti_second_level_third_fig = anti_second_level_third_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_third_fig, x="月份", y="num", color='抗菌药物等级', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_third_fig_data
# # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取菌检出三级第一张图数据
def get_third_level_first_fig_date(engine,btime,etime):
res = pd.read_sql(f"""select substr(REQUESTTIME,1,7) as month,BACTERIA as 菌,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}'
group by BACTERIA, substr(REQUESTTIME,1,7)
""",con=engine)
return res
# 三级第一张图更新
@app.callback(
Output('bar_third_level_first_fig', 'figure'),
Output('bar_third_level_first_fig_data', 'data'),
Input('bar_third_level_first_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(bar_third_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if bar_third_level_first_fig_data is None:
bar_third_level_first_fig_data = {}
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json( orient='split', date_format='iso')
bar_third_level_first_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
bar_third_level_first_fig_data = json.loads(bar_third_level_first_fig_data)
if db_con_url['hosname'] != bar_third_level_first_fig_data['hosname']:
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json(orient='split', date_format='iso')
bar_third_level_first_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
if bar_third_level_first_fig_data['btime'] != btime or bar_third_level_first_fig_data['etime'] != etime:
bar_third_level_first_fig = get_third_level_first_fig_date(engine, btime, etime)
bar_third_level_first_fig_data['bar_third_level_first_fig'] = bar_third_level_first_fig.to_json(orient='split', date_format='iso')
bar_third_level_first_fig_data['btime'] = btime
bar_third_level_first_fig_data['etime'] = etime
bar_third_level_first_fig_data = json.dumps(bar_third_level_first_fig_data)
else:
bar_third_level_first_fig = pd.read_json( bar_third_level_first_fig_data['bar_third_level_first_fig'], orient='split')
bar_third_level_first_fig_data = dash.no_update
bar_third_level_first_fig = bar_third_level_first_fig.sort_values(['month' ])
print(bar_third_level_first_fig)
fig1 = px.line(bar_third_level_first_fig, x='month', y= 'num' , color= '菌', color_discrete_sequence=px.colors.qualitative.Dark24)
fig1.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig1.update_yaxes(title_text= '菌检出数量', )
fig1.update_xaxes(title_text= '月份', )
return fig1,bar_third_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 三级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取菌检出三级第二张图数据
def get_third_level_second_fig_date(engine,btime,etime):
res_信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month'])
bus_dic = {
'菌检出类型': f" select '菌检出类型缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and BTYPE is null group by substr(REQUESTTIME,1,7) ",
'院内外': f" select '院内外标识缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and OUTSIDE is null group by substr(REQUESTTIME,1,7) ",
'标本缺失': f" select '标本缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SPECIMEN is null group by substr(REQUESTTIME,1,7) ",
'检验项目': f" select '检验项目缺失' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}') and SUBJECT is null group by substr(REQUESTTIME,1,7) ",
'申请时间大于报告时间': f" select '菌检出申请时间大于报告时间' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1 where (substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}') and REQUESTTIME is not null and REPORTTIME is not null and REQUESTTIME>REPORTTIME group by substr(REQUESTTIME,1,7) ",
'申请时间在出入院时间之外': f""" select '菌检出申请时间在出入院时间之外' as 业务类型,count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA t1,overall t2 where
( t1.REQUESTTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.REQUESTTIME<t2.IN_TIME or t1.REQUESTTIME > t2.OUT_TIME )
and (substr(t1.REQUESTTIME,1,7)>='{btime}' and substr(t1.REQUESTTIME,1,7)<='{etime}')
group by substr(REQUESTTIME,1,7)
""",
}
for bus in bus_dic:
res_信息缺失及汇总 = res_信息缺失及汇总.append(pd.read_sql(bus_dic[bus], con=engine))
return res_信息缺失及汇总
# 三级第二张图更新
@app.callback(
Output('bar_third_level_second_fig', 'figure'),
Output('bar_third_level_second_fig_data', 'data'),
Input('bar_third_level_second_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_second_fig(bar_third_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if bar_third_level_second_fig_data is None:
bar_third_level_second_fig_data = {}
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json( orient='split', date_format='iso')
bar_third_level_second_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
bar_third_level_second_fig_data = json.loads(bar_third_level_second_fig_data)
if db_con_url['hosname'] != bar_third_level_second_fig_data['hosname']:
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json(orient='split', date_format='iso')
bar_third_level_second_fig_data['hosname'] = db_con_url['hosname']
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
if bar_third_level_second_fig_data['btime'] != btime or bar_third_level_second_fig_data['etime'] != etime:
bar_third_level_second_fig = get_third_level_second_fig_date(engine, btime, etime)
bar_third_level_second_fig_data['bar_third_level_second_fig'] = bar_third_level_second_fig.to_json(orient='split', date_format='iso')
bar_third_level_second_fig_data['btime'] = btime
bar_third_level_second_fig_data['etime'] = etime
bar_third_level_second_fig_data = json.dumps(bar_third_level_second_fig_data)
else:
bar_third_level_second_fig = | pd.read_json( bar_third_level_second_fig_data['bar_third_level_second_fig'], orient='split') | pandas.read_json |
"""
birdspotter is a python package providing a toolkit to measures the social influence and botness of twitter users.
"""
import simplejson
from tqdm import tqdm
import wget
import zipfile
import pandas as pd
import pickle as pk
import numpy as np
from birdspotter.utils import *
import traceback
import collections
from xgboost.sklearn import XGBClassifier
import xgboost as xgb
import os
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import dateutil
from birdspotter.user_influence import P, influence
from itertools import islice
import ijson
class BirdSpotter:
"""Birdspotter measures the social influence and botness of twitter users.
This class takes a twitter dump in (json or jsonl format) and extract metrics bot and influence metrics for the users.
The class will download word2vec embeddings if they are not specified.
It exposes processed data from the tweet dumps.
Attributes:
cascadeDataframe (:class:`pandas.DataFrame`): A dataframe of tweets ordered by cascades and time (the column casIndex denotes which cascade each tweet belongs to)
featureDataframe (:class:`pandas.DataFrame`): A dataframe of users with their respective botness and influence scores.
hashtagDataframe (:class:`pandas.DataFrame`): A dataframe of the text features for hashtags.
"""
def __init__(self, path, tweetLimit = None, embeddings='download', quiet=False):
"""Birdspotter measures the social influence and botness of twitter users.
Parameters
----------
path : str
The path to a tweet json or jsonl file containing the tweets for analysis.
tweetLimit : int, optional
A limit on the number of tweets to process if the tweet dump is too large, if None then all tweets are processed, by default None
embeddings : collections.Mapping or str, optional
A method for loading word2vec embeddings, which accepts are path to embeddings, a mapping object or a pickle object. Refer to setWord2VecEmbeddings for details. By default 'download'
quiet : bool, optional
Determines if debug statements will be printed or not, by default False
"""
self.word2vecEmbeddings = None
self.quiet = quiet
self.extractTweets(path, tweetLimit = tweetLimit, embeddings=embeddings)
def __pprint(self, message):
if not self.quiet:
print(message)
def setWord2VecEmbeddings(self, embeddings='download', forceReload=True):
"""Sets the word2vec embeddings. The embeddings can be a path to a pickle or txt file, a mapping object or the string 'download' which will automatically download and use the FastText 'wiki-news-300d-1M.vec' if not available in the current path.
Parameters
----------
embeddings : collections.Mapping or str or None, optional
A method for loading word2vec embeddings. A path to a embeddings pickle or txt file, a mapping object, the string 'download', by default 'download'. If None, it does nothing.
forceReload : bool, optional
If the embeddings are already set, forceReload determines whether to update them, by default True
"""
if not forceReload and self.word2vecEmbeddings is not None:
return
if embeddings is None:
return
elif isinstance(embeddings, str) and embeddings == 'download':
if os.path.isfile('./wiki-news-300d-1M.vec'):
self.__pprint("Loading Fasttext wiki-news-300d-1M.vec Word2Vec Embeddings...")
with open('./wiki-news-300d-1M.vec',"r") as f:
model = {}
if not self.quiet:
pbar = tqdm(total=1000000)
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
if not self.quiet:
pbar.update(1)
if not self.quiet:
pbar.close()
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
else:
try:
self.__pprint("Downloading Fasttext embeddings")
filename = wget.download('https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip')
self.__pprint('\n')
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('./')
self.__pprint("Loading downloaded Fasttext wiki-news-300d-1M.vec Word2Vec Embeddings...")
with open('./wiki-news-300d-1M.vec',"r") as f:
model = {}
if not self.quiet:
pbar = tqdm(total=1000000)
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
if not self.quiet:
pbar.update(1)
if not self.quiet:
pbar.close()
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
except Exception as e:
print(e)
elif isinstance(embeddings, str):
embeddingsPath = embeddings
_,fileextension = os.path.splitext(embeddingsPath)
if fileextension == '.pickle':
self.__pprint("Loading Word2Vec Embeddings...")
with open(embeddingsPath,"rb") as f:
self.word2vecEmbeddings = pk.load(f)
self.__pprint("Finished loading Word2Vec Embeddings")
elif fileextension == '.txt':
self.__pprint("Loading Word2Vec Embeddings...")
with open(embeddingsPath,"r") as f:
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
self.word2vecEmbeddings = model
self.__pprint("Finished loading Word2Vec Embeddings")
elif isinstance(embeddings, collections.Mapping):
self.word2vecEmbeddings = embeddings
def extractTweets(self, filePath, tweetLimit = None, embeddings='download'):
"""Extracts tweets from a json or jsonl file and generates cascade, feature and hashtag dataframes as class attributes.
Note that we use the file extension to determine how to handle the file.
Parameters
----------
filePath : str
The path to a tweet json or jsonl file containing the tweets for analysis.
tweetLimit : int, optional
A limit on the number of tweets to process if the tweet dump is too large, if None then all tweets are processed, by default None
embeddings : collections.Mapping or str or None, optional
A method for loading word2vec embeddings. A path to a embeddings pickle or txt file, a mapping object, the string 'download', by default 'download'. If None, it does nothing.
Returns
-------
DataFrame
A dataframe of user's botness and influence scores (and other features).
"""
# Appending DataFrames line by line is inefficient, because it generates a
# new dataframe each time. It better to get the entire list and them concat.
user_list = []
tweet_list = []
w2v_content_list = []
w2v_description_list = []
cascade_list = []
self.__pprint("Starting Tweet Extraction")
_,fileextension = os.path.splitext(filePath)
raw_tweets = []
with open(filePath, encoding="utf-8") as f:
if fileextension == '.jsonl':
raw_tweets = map(simplejson.loads, list(islice(f, tweetLimit)))
elif fileextension == '.json':
raw_tweets = list(islice(ijson.items(f, 'item'),tweetLimit))
else:
raise Exception('Not a valid tweet dump. Needs to be either jsonl or json, with the extension explicit.')
if not self.quiet:
pbar = tqdm()
for j in raw_tweets:
if not self.quiet:
pbar.update(1)
try:
temp_user = {}
temp_tweet = {}
temp_text = (j['text'] if 'text' in j.keys() else j['full_text'])
temp_content = {'status_text': temp_text, 'user_id' : j['user']['id_str']}
temp_description = {'description':j['user']['description'], 'user_id' : j['user']['id_str']}
temp_cascade = {}
if 'retweeted_status' in j:
temp_cascade['cascade_id'] = j['retweeted_status']['id_str']
temp_cascade['original_created_at'] = j['retweeted_status']['created_at']
temp_cascade['created_at'] = j['created_at']
temp_cascade['retweeted'] = True
else:
temp_cascade['cascade_id'] = j['id_str']
temp_cascade['original_created_at'] = j['created_at']
temp_cascade['created_at'] = j['created_at']
temp_cascade['retweeted'] = False
temp_cascade['follower_count'] = j['user']['followers_count']
temp_cascade['status_text'] = temp_text
temp_cascade['screen_name'] = j['user']['screen_name']
temp_cascade['hashtag_entities'] = [e['text'] for e in j['entities']['hashtags']]
temp_user['screen_name'] = j['user']['screen_name']
temp_user['url'] = j['user']['profile_image_url_https']
temp_user['description'] = j['user']['description']
temp_user['followers_count'] = j['user']['followers_count']
temp_cascade['user_id'] = j['user']['id_str']
temp_user['user_id'] = j['user']['id_str']
temp_tweet['user_id'] = j['user']['id_str']
temp_user.update(getTextFeatures('name',j['user']['name']))
temp_user.update(getTextFeatures('location',j['user']['location']))
temp_user.update(getTextFeatures('description',j['user']['description']))
for key in ['statuses_count', 'listed_count', 'friends_count', 'followers_count']:
temp_user[key] = j['user'][key]
temp_user['verified'] = 1 if j['user']['verified'] else 0
temp_user['ff_ratio'] = (temp_user['followers_count'] + 1)/(temp_user['followers_count'] + temp_user['friends_count'] + 1)
n = datetime.now()
temp_user['years_on_twitter'] = (datetime(n.year, n.month, n.day) - datetime.strptime(j['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')).days/365
temp_user['statuses_rate'] = (temp_user['statuses_count'] + 1)/(temp_user['years_on_twitter'] + .001)
temp_user['tweets_to_followers'] = (temp_user['statuses_count'] + 1)/(temp_user['followers_count'] + 1)
temp_user['retweet_count'] = j['retweet_count']
temp_user['favorite_count'] = j['favorite_count']
temp_user['favourites_count'] = j['user']['favourites_count']
temp_tweet.update(getTextFeatures('status_text',temp_text))
temp_tweet['n_tweets'] = 1 if 'retweeted_status' in j and ('quoted_status_is' in j) else 0
temp_tweet['n_retweets'] = 1 if 'retweeted_status' in j else 0
temp_tweet['n_quotes'] = 1 if 'quoted_status_id' in j else 0
temp_tweet['n_timeofday'] = hourofweekday(j['created_at'])
temp_tweet.update(getSource(j['source']))
user_list.append(temp_user)
tweet_list.append(temp_tweet)
w2v_content_list.append(temp_content)
w2v_description_list.append(temp_description)
cascade_list.append(temp_cascade)
except Exception as err:
traceback.print_tb(err.__traceback__)
if not self.quiet:
pbar.close()
# We are assuming that user data doesn't change much and if it does, we take that 'latest' as our feature
userDataframe = pd.DataFrame(user_list).fillna(0).set_index('user_id')
userDataframe = userDataframe[~userDataframe.index.duplicated(keep='last')]
tweetDataframe = pd.DataFrame(tweet_list).fillna(0).set_index('user_id')
n_retweets = tweetDataframe['n_retweets'].groupby('user_id').sum()
n_quoted = tweetDataframe['n_quotes'].groupby('user_id').sum()
tweetDataframe = tweetDataframe.groupby('user_id').mean()
tweetDataframe['n_retweets'] = n_retweets
tweetDataframe['n_quotes'] = n_quoted
self.cascadeDataframe = | pd.DataFrame(cascade_list) | pandas.DataFrame |
import csv
import os
import pandas as pd
import json
#
def search(dirname):
filenames = os.listdir(dirname)
itemList = []
for filename in filenames:
itemList.append(filename.split(".")[0])
return itemList
def dataReadAndProcess(itemName):
# df = pd.read_csv('./Data/'+itemName+'.csv') # 이거면 충분한데 현재 버전 때문에 사용을 못함. save_csv도 마찬가지
# 따라서 불러와서 pandas.DataFrame으로 저장함
f = open('./Data/' + itemName + '.csv', 'r', encoding='utf-8')
rdr = csv.reader(f)
rawData = []
#
jsonfile = open('./Data/' + itemName + '.json', 'w', encoding='utf-8')
fieldnames = ("m_date", "openPrice", "highPrice", "lowPrice", "currentPrice", "volumn", "tradingValue")
reader = csv.DictReader(f, fieldnames)
for row in reader:
# json_string = json.dumps(json_object)
# print(json_string)
json.dump(row, jsonfile)
jsonfile.write('\n')
#
for line in rdr:
rawData.append(line)
f.close()
column = ['date', 'open', 'high', 'low', 'close', 'amount', 'tradePrice']
df = | pd.DataFrame.from_records(rawData, columns=column) | pandas.DataFrame.from_records |
'''
Factor mimicking portfolio in China market
'''
#%%
from cProfile import label
import sys,os
sys.path.append(os.path.abspath(".."))
# %% import data
# Monthly return of stocks in China security market
import pandas as pd
month_return = pd.read_hdf('.\data\month_return.h5', key='month_return')
company_data = pd.read_hdf('.\data\last_filter_pe.h5', key='data')
trade_data = pd.read_hdf('.\data\mean_filter_trade.h5', key='data')
# %% data preprocessing
# select the A share stock
month_return = month_return[month_return['Markettype'].isin([1, 4, 16])]
# % distinguish the stocks whose size is among the up 30% stocks in each month
def percentile(stocks) :
return stocks >= stocks.quantile(q=.3)
month_return['cap'] = month_return.groupby(['Trdmnt'])['Msmvttl'].apply(percentile)
# %% Construct proxy variable
import numpy as np
# RMW
# in this demo, the ROE(TTM) are used
# ROE(TTM) = PBV1B/PE(TTM)
company_data['ROE(TTM)'] = company_data['PBV1B']/company_data['PE1TTM']
# CMA
# % calculate the total asset
# asset = debt + equity
# debt = company_value - market_value
# equity = market_value / PB
company_data['debt'] = company_data['EV1'] - company_data['MarketValue']
company_data['equity'] = company_data['MarketValue']/company_data['PBV1A']
company_data['asset'] = company_data['debt'] + company_data['equity']
# asset growth rate
company_data['asset_growth_rate'] = company_data['asset'].groupby(['Symbol']).diff(12)/company_data['asset']
# Momentum
month_return['rolling_12'] = np.array(month_return.groupby(['Stkcd'])['Mretwd'].rolling(12).sum())
month_return['momentum'] = month_return['rolling_12'] - month_return['Mretwd']
# Turnover
trade_data['rolling_Turnover'] = np.array(trade_data['Turnover'].groupby('Symbol').rolling(12).mean())
trade_data['specific_Turnover'] = trade_data['Turnover'] / trade_data['rolling_Turnover']
# %% merge data
from pandas.tseries.offsets import *
month_return['Stkcd_merge'] = month_return['Stkcd'].astype(dtype='string')
month_return['Date_merge'] = | pd.to_datetime(month_return['Trdmnt']) | pandas.to_datetime |
import os
from typing import List, Dict
import json
import argparse
import logging
from tqdm import trange
import ftfy
import pandas as pd
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.optim import AdamW
from transformers import BertPreTrainedModel, BertModel, BertTokenizer, get_linear_schedule_with_warmup
from seqeval.metrics import accuracy_score
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG)
# hyper parameters. Note: they are fixed and not fine-tuned on the validation set for the purpose of the exercise
MAX_SEQUENCE_LENGTH = 32
BATCH_SIZE = 32
EPOCHS = 2
MAX_GRAD_NORM = 1.0
LEARNING_RATE = 3e-5
EPSILON = 1e-8
FEED_FORWARD_DROPOUT = 0.3
FULL_FINE_TUNING = True
def _load_data(data_file: str) -> List[Dict]:
"""
Method load data from JSONL file and drops records with noise, such as:
1. value is empty.
2. value is not contained in the sentence
:param data_file: path to JSON file
:return: List[Dict]
"""
results = []
with open(data_file, "r") as in_fp:
for json_line in in_fp:
try:
utterance_json = json.loads(json_line.strip())
value = utterance_json["value"]
parameter = utterance_json["parameter"]
utterance = utterance_json["utterance"]
if not value:
logging.warning(f"value is empty in: {utterance_json}")
continue
if not parameter:
logging.warning(f"parameter is empty in: {utterance_json}")
continue
if value not in utterance:
logging.warning(f"value: '{value}' not in utterance: {utterance}")
continue
results.append(utterance_json)
except json.decoder.JSONDecodeError:
logging.warning(f"line: {json_line.strip()} has a broken json schema")
return results
def _encode_json(tokenizer, sample_json: dict, max_sequence_length: int) -> Dict:
"""
Method enrich utterance json with encoded utterance, value and label for training later.
:param tokenizer: BERT tokenizer
:param sample_json: utterance JSON
:param max_sequence_length: max sequence length (BERT padding is performed if necessary)
:return: Dict
"""
utterance = sample_json["utterance"]
# convert non UTF-8 characters to their matching character in the utterance
utterance = ftfy.fix_text(utterance)
pair_encoded = tokenizer.encode_plus(
utterance,
sample_json["parameter"],
add_special_tokens=True,
pad_to_max_length=True,
max_length=max_sequence_length,
)
value = sample_json["value"]
# for sake of simplicity, we assume the value token after WordPiece tokenization consist of only one word
# it should be improved in the future, since it's not always the case.
value_token_id = tokenizer.encode_plus(value, add_special_tokens=False)["input_ids"][0]
pair_input_ids = pair_encoded["input_ids"]
if value_token_id not in pair_input_ids:
logging.warning(f"value {value} not in utterance as a word: {utterance}")
return {}
value_token_position = pair_input_ids.index(value_token_id)
token_type_ids = pair_encoded["token_type_ids"]
# labels is a vector of dimension 1 with the correct token index
labels = [value_token_position]
assert len(pair_input_ids) == len(token_type_ids)
assert pair_input_ids[value_token_position] == value_token_id
return dict(
**sample_json,
input_ids=pair_input_ids,
token_type_ids=token_type_ids,
labels=labels,
)
class BertForValueExtraction(BertPreTrainedModel):
"""
This class represents the model for value extraction in a given query.
The model is based on fine-tuned BERT (small) with a multi-layer perceptron on top of each token,
that outputs a logit. Finally, the model takes the logits out of each feed-forward of each token,
performs a softmax operation to get probabilities for each token and calculates a cross-entropy loss.
Disclaimer: sometimes WordPiece tokenized the true word into more than one word token, and the model predicts
only one word exactly. This should be fixed in the future.
"""
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.ff = nn.Sequential(nn.Linear(config.hidden_size, 100), nn.ReLU(), nn.Dropout(FEED_FORWARD_DROPOUT),
nn.Linear(100, 50), nn.ReLU(), nn.Dropout(FEED_FORWARD_DROPOUT),
nn.Linear(50, 1))
self.init_weights()
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None,
labels=None
):
outputs = self.bert(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.ff(sequence_output)
outputs = (logits,)
if labels is not None:
# get probabilities out of logits
predicted_probs = torch.softmax(logits, 1)
# negative log likelihood loss
loss = torch.nn.functional.nll_loss(predicted_probs, labels)
outputs = (loss,) + outputs
return outputs # (loss), scores
def _train_model(model: BertForValueExtraction, optimizer, scheduler, train_data_loader, val_data_loader) -> List[int]:
"""
Main method to train & evaluate model.
:param model: BertForValueExtraction object
:param optimizer: optimizer
:param scheduler: scheduler
:param train_data_loader: training DataLoader object
:param val_data_loader: validation DataLoader object
:return: List[int] - validation predictions
"""
val_predictions = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Using device: {device}")
for epoch_number in trange(EPOCHS, desc="Epoch"):
# put the model into training mode.
model.train()
# reset the total loss for this epoch.
total_loss = 0
# training loop
train_true_labels, train_predictions = [], []
for step, batch in enumerate(train_data_loader):
# add batch to gpu if available
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_token_type_ids, b_labels = batch
# always clear any previously calculated gradients before performing a backward pass
model.zero_grad()
# forward pass
# This will return the loss (together with the model output) because we have provided the `labels`
outputs = model(b_input_ids, token_type_ids=b_token_type_ids, labels=b_labels)
# get the loss
loss = outputs[0]
# move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
b_labels = b_labels.to("cpu").numpy()
train_predictions.extend(logits.argmax(1))
train_true_labels.extend(b_labels)
# perform a backward pass to calculate the gradients
loss.backward()
# track train loss
total_loss += loss.item()
# clip the norm of the gradient
# this is to help prevent the "exploding gradients" problem
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=MAX_GRAD_NORM)
# update parameters
optimizer.step()
# Update the learning rate
scheduler.step()
# calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_data_loader)
logging.info(f"Average train loss on epoch {epoch_number}: {avg_train_loss}")
accuracy = accuracy_score(train_predictions, train_true_labels)[0]
logging.info(f"Train Accuracy on epoch {epoch_number + 1}: {accuracy}")
# Put the model into evaluation mode
model.eval()
# reset the validation loss for this epoch
eval_loss, eval_accuracy = 0, 0
val_predictions, true_labels = [], []
for batch in val_data_loader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_token_type_ids, b_labels = batch
# telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# forward pass, calculate logit predictions
# this will return the logits rather than the loss because we have not provided labels
outputs = model(b_input_ids, token_type_ids=b_token_type_ids, labels=b_labels)
# move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
b_labels = b_labels.to("cpu").numpy()
# calculate the accuracy for this batch of test sentences
eval_loss += outputs[0].mean().item()
val_predictions.extend(logits.argmax(1))
true_labels.extend(b_labels)
eval_loss = eval_loss / len(val_data_loader)
logging.info(f"Validation loss on epoch {epoch_number + 1}: {eval_loss}")
accuracy = accuracy_score(val_predictions, true_labels)[0]
logging.info(f"Validation Accuracy on epoch {epoch_number + 1}: {accuracy}")
logging.info("\n")
return [val_prediction.tolist()[0] for val_prediction in val_predictions]
def _output_results(output_dir: str, predictions: List[int], jsons: List[Dict], tokenizer) -> None:
"""
Output predictions to CSV file.
:param output_dir: output directory to save results
:param predictions: iterable of predictions
:param jsons: extracted list of JSONs of the data
:param tokenizer: BERT tokenizer
:return None
"""
results = []
for json_sample, prediction in zip(jsons, predictions):
tokenized_utterance = tokenizer.convert_ids_to_tokens(json_sample["input_ids"])
assert prediction < len(tokenized_utterance) # assert the predicted token index is valid
predicted_token = tokenized_utterance[prediction]
results.append([json_sample["utterance"], json_sample["value"], predicted_token])
# calculating exact match metric
true_values = [result[1] for result in results]
predicted_values = [result[2] for result in results]
exact_match = sum(1 for true_y, pred_y in zip(true_values, predicted_values)
if true_y == pred_y) / float(len(true_values))
logging.info(f"Exact match metric: {exact_match}")
# output results to CSV
results_df = | pd.DataFrame(results, columns=["utterance", "true_value", "predicted_value"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
def center(X):
for col in X.columns:
X.loc[:, col] = X.loc[:, col]-np.mean(X.loc[:, col])
return X
def add_intercept(X):
"""Add all 1's column to predictor matrix"""
X['intercept'] = [1]*X.shape[0]
def one_hot_code(df1, sens_dict):
cols = df1.columns
for c in cols:
if isinstance(df1[c][0], str):
column = df1[c]
df1 = df1.drop(c, 1)
unique_values = list(set(column))
n = len(unique_values)
if n > 2:
for i in range(n):
col_name = '{}.{}'.format(c, i)
col_i = [1 if el == unique_values[i] else 0 for el in column]
df1[col_name] = col_i
sens_dict[col_name] = sens_dict[c]
del sens_dict[c]
else:
col_name = c
col = [1 if el == unique_values[0] else 0 for el in column]
df1[col_name] = col
return df1, sens_dict
# num_sens in 1:18
def clean_communities(num_sens):
"""Clean communities & crime data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/communities.csv')
df = df.fillna(0)
y = df['ViolentCrimesPerPop']
q_y = np.percentile(y, 70) ################### 70 or 20 ????????????????????????? #####################
# convert y's to binary predictions on whether the neighborhood is
# especially violent
y = [np.round((1 + np.sign(s - q_y)) / 2) for s in y]
X = df.iloc[:, 0:122]
# hot code categorical variables
sens_df = pd.read_csv('dataset/communities_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
#print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
#print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names[num_sens-1]].copy()
x_prime = 1*(x_prime > np.median(x_prime))
X = X.drop(sens_names[num_sens-1], axis = 1)
return X, x_prime, pd.Series(y)
# num_sens in 1:11
def clean_lawschool(num_sens):
"""Clean law school data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/lawschool.csv')
df = df.dropna()
# convert categorical column variables to 0,1
df['gender'] = df['gender'].map({'female': 1, 'male': 0})
# remove y from df
df_y = df['bar1']
df = df.drop('bar1', 1)
y = [int(a == 'P') for a in df_y]
y = pd.Series(y)
sens_df = pd.read_csv('dataset/lawschool_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
# one hot coding of race variable
for i in range(1, 9):
col_name = 'race{}'.format(i)
if 'race' in sens_cols:
sens_dict[col_name] = 1
else:
sens_dict[col_name] = 0
race_code = [np.int(r == i) for r in df['race']]
df[col_name] = race_code
sens_dict['race'] = 0
df = df.drop('race', 1)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
#print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names].copy()
x_prime.age = 1*(x_prime.age > np.median(x_prime.age)) ########## OK ??????????? ##############
x_prime.fam_inc = 1*(x_prime.fam_inc > np.median(x_prime.fam_inc)) ########## OK ????????? ##############
x_prime = x_prime[sens_names[num_sens-1]]
df = df.drop(sens_names[num_sens-1], axis = 1)
df.index = range(len(df))
x_prime.index = range(len(x_prime))
return df, x_prime, pd.Series(y)
# num_sens 1:7
def clean_adult(num_sens):
df = | pd.read_csv('dataset/adult.csv') | pandas.read_csv |
from backlight.strategies import amount_based as module
import pytest
import pandas as pd
import numpy as np
import backlight
from backlight.labelizer.common import TernaryDirection
from backlight.strategies.common import Action
@pytest.fixture
def signal():
symbol = "usdjpy"
periods = 22
df = pd.DataFrame(
index= | pd.date_range(start="2018-06-06", freq="1min", periods=periods) | pandas.date_range |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
| tm.assert_series_equal(s, expected) | pandas.util.testing.assert_series_equal |
import ast
import pandas as pd
import csv
import itertools
import korbinian
import numpy as np
import os
import pickle
import re
import sys
import korbinian.utils as utils
from multiprocessing import Pool
def run_calculate_gap_densities(pathdict, s, logging):
"""Runs calculate_gap_densities using multiprocessing Pool.
Uses multiprocessing to generate a separate output file for each protein, which can be "gathered" later
using the gather_gap_densities function.
Parameters
----------
pathdict : dict
Dictionary of the key paths and files associated with that List number.
s : dict
Settings dictionary extracted from excel settings file.
logging : logging.Logger
Logger for printing to console and/or logfile.
If multiprocessing == True, messages sent to the logger, e.g. logging.info(message), will only print to console.
"""
logging.info("~~~~~~~~~~~~ starting calculate_gap_densities ~~~~~~~~~~~~")
# if multiprocessing is used, log only to the console
p_dict_logging = logging if s["use_multiprocessing"] != True else utils.Log_Only_To_Console()
# set current working directory as the data_dir/homol, where temp files will be saved before moving to zip
os.chdir(os.path.join(s["data_dir"], "homol"))
not_in_homol_db = utils.get_acc_list_from_txt(pathdict["acc_not_in_homol_db_txt"])
# create list of protein dictionaries to process
list_p = korbinian.utils.convert_summary_csv_to_input_list(s, pathdict, p_dict_logging, list_excluded_acc=not_in_homol_db)
# number of processes is the number the settings, or the number of proteins, whichever is smallest
n_processes = s["multiprocessing_cores"] if s["multiprocessing_cores"] < len(list_p) else len(list_p)
if s["use_multiprocessing"]:
with Pool(processes=n_processes) as pool:
gap_return_statement_list = pool.map(korbinian.gap.calculate_gap_densities, list_p)
# log the list of protein results (e.g. acc, "simap", True) to the actual logfile, not just the console
for message in gap_return_statement_list:
logging.info(message)
else:
for p in list_p:
korbinian.gap.calculate_gap_densities(p)
logging.info("~~~~~~~~~~~~ calculate_gap_densities is finished ~~~~~~~~~~~~")
def calculate_gap_densities(p):
"""For one protein, calculates the gap positions amongst homologues.
Based on the scripts from <NAME>.
Some changes and annotations by Mark, including the adaption to korbinian, and conversion to multiprocessing.
Note that Rimma's code for the slicing of JM regions has been inserted into "slice.py".
Parameters
----------
p : dict
Protein Dictionary. Contains all input settings, sequences and filepaths related to a single protein.
Protein-specific data is extracted from one row of the the list summary, e.g. List05_summary.csv, which is read as df.
p also contains the GENERAL korbinian settings and filepaths for that list (pathdict, s, logging)
Components of p :
pathdict : dict
Dictionary of the key paths and files associated with that List number.
s : dict
Settings dictionary extracted from excel settings file.
logging : logging.Logger
Logger for printing to console and/or logfile.
If multiprocessing == True, logging.info etc will only print to console.
p : protein-specific dictionary components
acc, list_of_TMDs, description, TM01_seq, etc
Returns
-------
In all cases, a tuple (str, bool, str) is returned.
if successful:
return acc, True, "0"
if not successful:
return acc, False, "specific warning or reason why protein failed"
"""
pathdict, s, logging = p["pathdict"], p["s"], p["logging"]
acc = p["acc"]
######################################################################################################################
# #
# Define some constants. Skip protein if already done. #
# #
######################################################################################################################
# Maximum number of gaps for tmds to be considered
allowed_gaps_per_tmd = s["gap_allowed_gaps_per_tmd"]
# 24 for beta barrel proteins, can be altered if only several TMDs to consider
max_number_of_tmds = s["max_number_of_tmds"]
# # iterate through each protein that has a list_of_TMDs
protein_name = p["protein_name"]
# define output file path
gapout_csv_path = "{}_gapout.csv".format(p['homol_base'])
# The next steps (the main analysis) is only executed, if previous analysis can be overwritten or no analysis has yet been done
if s["overwrite_previous_gap_analysis"] == False:
if os.path.isfile(gapout_csv_path):
message = "{} skipped, gaps already analysed."
logging.info(message)
return acc, False, message
logging.info(acc)
list_of_TMDs = ast.literal_eval(p["list_of_TMDs"])
if not os.path.isfile(p['homol_df_orig_zip']):
message = "{} skipped, {} not found.".format(acc, p['homol_df_orig_zip'])
logging.info(message)
return acc, False, message
######################################################################################################################
# #
# Filter by FASTA_gapped_identity (of full protein), etc. #
# Open homol_df_orig_pickle, which contains the full sequence of all homologues, and GappedIdentity, etc #
# #
######################################################################################################################
dfh = utils.open_df_from_pickle_zip(p['homol_df_orig_zip'], filename=os.path.basename(p['homol_df_orig_pickle']), delete_corrupt=True)
dfh.index = dfh.index.astype(int)
if dfh.empty:
message = "{} Protein skipped, file deleted as it is possibly corrupt.".format(p['homol_df_orig_zip'])
logging.info(message)
return acc, False, message
gap_homol_query_str = 'FASTA_gapped_identity > {min_ident} and ' \
'FASTA_gapped_identity < {max_ident} and ' \
'hit_contains_SW_node == True and ' \
'disallowed_words_not_in_descr == True and ' \
'X_in_match_seq == False'.format(min_ident=s["gap_min_identity_of_full_protein"], max_ident=s["gap_max_identity_of_full_protein"])
# filter based on the query string
dfh.query(gap_homol_query_str, inplace=True)
if dfh.empty:
message = "{} skipped, filtering by gap_homol_query_str did not leave any valid homologues.".format(acc)
logging.info(message)
return acc, False, message
# keep the index of dfh after filtering based on protein-wide filters
dfh_filt_index = dfh.index
# remove dfh from memory, as it shouldn't be necessary anymore
del dfh
# open the dataframe for nonTMD sliced, including all TM indices, JM indices, and JM sequences
dfnon = utils.open_df_from_pickle_zip(p['fa_cr_sliced_TMDs_zip'], filename="{}_nonTMD_sliced_df.pickle".format(protein_name), delete_corrupt=True)
# filter based on dfh above, for general homologue settings (e.g. % identity of full protein)
try:
dfnon = dfnon.loc[dfh_filt_index, :]
except KeyError:
# use a try/except loop to detect the rare case where there are no valid homologues between both dataframes
# (running a set operation for all hit_num of all proteins is likely to slow the script, with no great benefit)
message = "{} skipped, dfnon does not contain any valid homologues."
logging.info(message)
return acc, False, message
######################################################################################################################
# #
# For the Beta-Barrel Dataset, check if the boolean toggle n_term_ec is there. This should be #
# False for all BB proteins, but you can confirm it by making sure the first residue is labelled #
# as "I" for Inside. #
# #
######################################################################################################################
""" Current code in uniprot_parse
# information about location of first non-tmd (extracellular or periplasmic/cytoplasmic)
if len(location_of_non_tmds_in_feature_list) > 0:
output_dict['loc_start'] = record.features[location_of_non_tmds_in_feature_list[0]][3]
output_dict['n_term_ec'] = "Extracellular" in output_dict["loc_start"]
else:
output_dict['loc_start'] = np.nan
output_dict['n_term_ec'] = np.nan
"""
if "n_term_ec" not in p:
if "Topology" in p:
# if the first residue is labelled as "inside, I", or "membrane, M"
if p["Topology"][0] in ["I", "M"]:
p["n_term_ec"] = False
elif p["Topology"][0] == "O":
p["n_term_ec"] = True
else:
raise ValueError('p["Topology"][0] not recognized')
else:
raise ValueError('n_term_ec not available')
# in some cases, p["n_term_ec"] is np.nan. Confirm that if empty, it is True.
n_term_ec = False if p["n_term_ec"] == False else True
# create empty output dict, to contain all of the lists of gap positions
gapout_dict = {}
# for each TMD in the proteins, creates new lists which will contain gap_positions, lists are saved in a column and created again for each tmd
for tmd in list_of_TMDs:
sys.stdout.write(".")
sys.stdout.flush()
# open the dataframe containing the sequences, gap counts, etc for that TMD only
df_s1 = utils.open_df_from_pickle_zip(p['fa_cr_sliced_TMDs_zip'], filename="{}_{}_sliced_df.pickle".format(protein_name, tmd), delete_corrupt=True)
# filter based on dfh above, for general homologue settings (e.g. % identity of full protein)
df_s1 = df_s1.loc[dfh_filt_index, :]
"""
cannot filter out rows that contain no gaps, as the JM number of gaps is not yet defined!
"""
#df_s1["%s_n_gaps_q_and_m"%tmd] = df_s1["%s_SW_query_num_gaps"%tmd] + df_s1.loc["%s_SW_match_num_gaps"%tmd]
max_before_filter = df_s1["{}_SW_query_num_gaps".format(tmd)].max()
shape_before = df_s1.shape
gap_TMD_query_str = '({TMD}_SW_query_num_gaps <= {allowed_gaps_per_tmd}) & ' \
'({TMD}_SW_match_num_gaps <= {allowed_gaps_per_tmd})'.format(TMD=tmd,allowed_gaps_per_tmd=allowed_gaps_per_tmd)
# filter based on the query string
df_s1.query(gap_TMD_query_str, inplace=True)
#len_of_query = len(df_s1["%s_SW_query_seq"%tmd][1]) # Length of first query sequence, which does (usually) not contain any gaps
# Length of query TM sequence
len_of_query = len(p["%s_seq" % tmd])
len_of_query_reversed= ((1/len_of_query)+1) # Reversed length, important if TMD needs to be reversed afterwards
list_of_gaps_in_tmd = []
list_of_gaps_intracellular = []
list_of_gaps_extracellular = []
df_s1["%s_n_gaps_q_and_m" % tmd] = df_s1["%s_SW_query_num_gaps" % tmd] + df_s1["%s_SW_match_num_gaps" % tmd]
min_n_gaps_in_TMD = s["gap_min_n_gaps_in_TMD"]
max_n_gaps_in_TMD = s["gap_max_n_gaps_in_TMD"]
# the ax number gaps is the max number for query or match, x2.
# this still has to be filtered further (query could have 4 gaps, depending on allowed_gaps_per_tmd), but it still cuts down on the number of items in the loop
max_n_gaps_in_TMD_q_plus_m = min_n_gaps_in_TMD + max_n_gaps_in_TMD
filt_string = "{mi} <= {TMD}_n_gaps_q_and_m <= {ma}".format(mi = min_n_gaps_in_TMD, TMD=tmd, ma = max_n_gaps_in_TMD_q_plus_m)
df_s1_TMD = df_s1.query(filt_string)
# if the query didn't return an empty dataframe
if df_s1_TMD.shape[0] != 0:
for hit in df_s1_TMD.index:
# '''
# Start of the main gap analysis
# Code searches for "-" in the TMD sequence and returns the index!! (not the position)
# '''
# Following if conditions only refer to gaps in the query!
# Query gaps are counted as "in between positions", for example: 4,5 refers to a gap between position 4 and 5;
# if two gaps occur one after another: only one position (between two amino acids is considered)
# Filter to make sure, that there are 1 or 2 gaps in the query sequence and up to the max allowed gaps in the match
if (df_s1.loc[hit, "%s_SW_query_num_gaps" % tmd] != 0.0) and (
df_s1.loc[hit, "%s_SW_query_num_gaps" % tmd] <= 2.0): # and (df_s1.loc[hit,"%s_SW_match_num_gaps"%tmd] <= int("%s"%allowed_gaps_per_tmd)):
# Stores the endpoints in a temp list; endpoints are used, to switch from python indices to numbers
list_of_gaps_per_hit_in_query = [m.start() for m in re.finditer("-", df_s1.loc[hit, "%s_SW_query_seq" % tmd]) if m.start()]
# if there are two gaps in the query (and 2 allowed), code checks if they are side by side (difference of 1)
# and appends the gap position, else appends both gap positions
# 0.5 is substracted in order to make them "in between" position;
# if two gaps are observed, 1.5 is substracted from the second one, since the residue positions are moved due to the first gap
if len(list_of_gaps_per_hit_in_query) == 2 and allowed_gaps_per_tmd == 2:
if list_of_gaps_per_hit_in_query[1] - list_of_gaps_per_hit_in_query[0] == 1:
list_of_gaps_in_tmd.append(list_of_gaps_per_hit_in_query[0] - 0.5)
else:
list_of_gaps_in_tmd.append(list_of_gaps_per_hit_in_query[0] - 0.5)
list_of_gaps_in_tmd.append(list_of_gaps_per_hit_in_query[1] - 1.5)
# if there is only one gap in query or only one gap is allowed, it appends the first (and only) gap from the list_of_gaps_per_hit_in_query to the list_of_TMDs
else:
if len(list_of_gaps_per_hit_in_query) == 1:
list_of_gaps_in_tmd.append(list_of_gaps_per_hit_in_query[0] - 0.5)
# Following if conditions only refer to gaps in the match!
# Query gaps are counted as deletions of positions; for example: 4 refers to a gap on position 4;
# if two gaps occur one after another, both are considered since two actual amino acids from the original query are deleted
# Since the gap positions are dependend on the query sequence, query-gap positions in the same alignment have to be considered as well
# Filter to make sure, that there are 1 or 2 gaps in the match sequence and up to the max allowed gaps in the query
if (df_s1.loc[hit, "%s_SW_query_num_gaps" % tmd] <= 2.0) and (df_s1.loc[hit, "%s_SW_match_num_gaps" % tmd] <= 2.0) \
and (df_s1.loc[hit, "%s_SW_match_num_gaps" % tmd] != 0.0):
# It's not sure that the list of hits in query was already determined, maybe there were no gaps, anyway here it is important how many
list_of_gaps_per_hit_in_query = [m.start() for m in re.finditer("-", df_s1.loc[hit, "%s_SW_query_seq" % tmd]) if m.start()]
list_of_gaps_per_hit_in_match = [m.start() for m in re.finditer("-", df_s1.loc[hit, "%s_SW_match_seq" % tmd]) if m.start()]
if len(list_of_gaps_per_hit_in_match) > 0:
for n in list(reversed(list_of_gaps_per_hit_in_match)):
substracted_value = len([m < n for m in list_of_gaps_per_hit_in_query])
list_of_gaps_in_tmd.append(abs(n - substracted_value))
# create a view that shows only the desired sequences
dfnon_j = dfnon[["seq_juxta_before_%s_in_query" % tmd, "seq_juxta_before_%s_in_match"%tmd, "seq_juxta_after_%s_in_query"%tmd, "seq_juxta_after_%s_in_match"%tmd]]
dfnon_j = dfnon_j.dropna(how="all")
for hit in dfnon_j.index:
#######
# Start of the Juxta Consideration
# In the case of n_term being located intracellular:
# there are 4 groups: 1. juxta_before_odd_TMDs + 2.juxta_after_even_TMDs 3. Juxta_before_even_TMDs + 4. Juxta_after_odd_TMDs
# 1 + 2 --> Intracellular
# 3 + 4 --> Extracellular
# If the n_term is extracellular, that it's the other way round. 1+2 --> Extracellular 3+4 --> Intracellular
### The data will already be flipped in order to align extracellular and intracellular parts, extracellular: + , intracellular: -
# juxta before_odd_TMDs:
if "SP01" in list_of_TMDs:
return ValueError ("The gap analysis is not currently designed for proteins with signal peptides.")
tmd_int = int(tmd[-2:]) # Integer of TMD number
if utils.isOdd(tmd_int) == True: # also für 1 , 3 ...
# list of gap indices
# makes sure that the search is done in a string
if type(dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd]) == str:
list_of_gaps_in_query_before_odd = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd][::-1])if m.start()+0.5 < 31]
# if one gap is found, code checks location and appends it
if len (list_of_gaps_in_query_before_odd)==1:
if n_term_ec == False:
list_of_gaps_intracellular.append(list_of_gaps_in_query_before_odd[0])
else:
list_of_gaps_extracellular.append(list_of_gaps_in_query_before_odd[0])
# if more than one gap is found, code checks if the gapy are one after another in the query!
if len (list_of_gaps_in_query_before_odd)>1.0:
following_gap = 0
rev_value = list_of_gaps_in_query_before_odd[0]
for n in list_of_gaps_in_query_before_odd:
if n-following_gap == rev_value:
if n_term_ec == False:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
else:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
else:
if n_term_ec == False:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
else:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
if type(dfnon_j.loc[hit,"seq_juxta_before_%s_in_match"%tmd])== str:
# Makes a list of gaps of the match of the odd juxta before the TMD
list_of_gaps_in_query_before_odd = [m.start()+1 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd][::-1])if m.start() < 32]
list_of_gaps_in_match_before_odd = [m.start()+1 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_match"%tmd][::-1])if m.start() < 32]
# This step is essential, to control, if there is a gap before, in the query region
for n in list(reversed(list_of_gaps_in_match_before_odd)):
greater_values = sum(i< n for i in list_of_gaps_in_query_before_odd)
if n_term_ec== False:
list_of_gaps_intracellular.append(n-greater_values)
else:
list_of_gaps_extracellular.append(n-greater_values)
# juxta after odd TMDs:
if type(dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd])== str:
list_of_gaps_in_query_after_odd = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd])if m.start()+0.5 < 31]
# if one gap is found, code checks location and appends it
if len (list_of_gaps_in_query_after_odd)==1:
if n_term_ec == False:
list_of_gaps_extracellular.append(list_of_gaps_in_query_after_odd[0])
else:
list_of_gaps_intracellular.append(list_of_gaps_in_query_after_odd[0])
# if more than one gap is found, code checks if the gaps are one after another in the query!
if len (list_of_gaps_in_query_after_odd)>1.0:
following_gap = 0
rev_value = list_of_gaps_in_query_after_odd[0]
for n in list_of_gaps_in_query_after_odd:
if n+following_gap == rev_value:
if n_term_ec == False:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
else:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
else:
if n_term_ec == False:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
else:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
# # juxta after odd TMDs:
# if type(dfnon_j.loc[hit,"seq_juxta_after_TM%.2d_in_query"%tmd_int])== str:
# list_of_gaps_in_query_after_odd = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_after_TM%.2d_in_query"%tmd_int])if m.start()+0.5 < 31]
#
# #if one gap is found, code checks location and appends it
# if len (list_of_gaps_in_query_after_odd)==1:
# if n_term_ec == False:
# list_of_gaps_extracellular.append(list_of_gaps_in_query_after_odd[0])
# else:
# list_of_gaps_intracellular.append(list_of_gaps_in_query_after_odd[0])
# #if more than one gap is found, code checks if the gaps are one after another in the query!
# if len (list_of_gaps_in_query_after_odd)>1.0:
# following_gap = 0
# rev_value = list_of_gaps_in_query_after_odd[0]
# for n in list_of_gaps_in_query_after_odd:
# if n+following_gap == rev_value:
# if n_term_ec == False:
# #list_of_gaps_extracellular.append(n-following_gap)
# following_gap = following_gap+1
# else:
# list_of_gaps_intracellular.append(n-following_gap)
# following_gap = following_gap+1
# else:
# if n_term_ec == False:
# list_of_gaps_extracellular.append(n-following_gap)
# following_gap = following_gap+1
# rev_value = n
# else:
# list_of_gaps_intracellular.append(n-following_gap)
# following_gap = following_gap+1
# rev_value = n
else: # for 2,4
# juxta before even TMDs:
# makes sure that the search is done in a string
if type(dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd])== str:
# list of gap indices
list_of_gaps_in_query_before_even = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd])if m.start()+0.5 < 31]
# if one gap is found, code checks location and appends it
if len (list_of_gaps_in_query_before_even)==1:
if n_term_ec == False:
list_of_gaps_extracellular.append(list_of_gaps_in_query_before_even[0])
else:
list_of_gaps_intracellular.append(list_of_gaps_in_query_before_even[0])
# if more than one gap is found, code checks if the gaps are one after another in the query!
if len (list_of_gaps_in_query_before_even)>1.0:
following_gap = 0
rev_value = list_of_gaps_in_query_before_even[0]
for n in list_of_gaps_in_query_before_even:
if n+following_gap == rev_value:
if n_term_ec == False:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
else:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
else:
if n_term_ec == False:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
else:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
if type(dfnon_j.loc[hit,"seq_juxta_before_%s_in_match"%tmd])== str:
# Makes a list of gaps of the match of the odd juxta before the TMD
list_of_gaps_in_query_before_even = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_query"%tmd])if m.start()+0.5 < 31]
list_of_gaps_in_match_before_even = [m.start()+1 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_before_%s_in_match"%tmd])if m.start() < 31]
for n in list(reversed(list_of_gaps_in_match_before_even)):
greater_values = sum(i< n for i in list_of_gaps_in_query_before_even)
if n_term_ec== False:
list_of_gaps_extracellular.append(n-greater_values)
else:
list_of_gaps_intracellular.append(n-greater_values)
# juxta after even TMDs:
if type(dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd])== str:
list_of_gaps_in_query_after_even = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd][::-1]) if m.start()+0.5 < 31]
# if one gap is found, code checks location and appends it
if len (list_of_gaps_in_query_after_even)==1:
if n_term_ec == False:
list_of_gaps_intracellular.append(list_of_gaps_in_query_after_even[0])
else:
list_of_gaps_extracellular.append(list_of_gaps_in_query_after_even[0])
# if more than one gap is found, code checks if the gaps are one after another in the query!
if len (list_of_gaps_in_query_after_even)>1.0:
following_gap = 0
rev_value = list_of_gaps_in_query_after_even[0]
for n in list_of_gaps_in_query_after_even:
if n-following_gap == rev_value:
if n_term_ec == False:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
else:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
else:
if n_term_ec == False:
list_of_gaps_intracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
else:
list_of_gaps_extracellular.append(n-following_gap)
following_gap = following_gap+1
rev_value = n
# Makes a list of gaps of the match of the odd juxta before the TMD
if type(dfnon_j.loc[hit,"seq_juxta_after_%s_in_match"%tmd])== str and type(dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd])==str:
list_of_gaps_in_query_after_even = [m.start()+0.5 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_after_%s_in_query"%tmd][::-1])if m.start()+0.5 < 31]
list_of_gaps_in_match_after_even = [m.start()+1 for m in re.finditer("-",dfnon_j.loc[hit,"seq_juxta_after_%s_in_match"%tmd][::-1])if m.start() < 31]
for n in list(reversed(list_of_gaps_in_match_after_even)):
greater_values = sum(i< n for i in list_of_gaps_in_query_after_even)
if n_term_ec== False:
list_of_gaps_intracellular.append(n-greater_values)
else:
list_of_gaps_extracellular.append(n-greater_values)
# sets of lists are created, to assure, that each gapposition contributes only once to the possible gap positions
unique_list_of_gaps_in_tmd = list(set(list_of_gaps_in_tmd))
unique_list_of_gaps_intracellular = list(set(list_of_gaps_intracellular))
unique_list_of_gaps_extracellular = list(set(list_of_gaps_extracellular))
# Saves the calculated lists into cells in the columns
gapout_dict["%s_occurring_gaps"%tmd]=str(unique_list_of_gaps_in_tmd)
gapout_dict["%s_amount_possible_gap_positions"%tmd]=len(unique_list_of_gaps_in_tmd)
gapout_dict['juxta_%s_intracellular_possible_gap_positions'%tmd] = str(unique_list_of_gaps_intracellular)
gapout_dict['juxta_%s_extracellular_possible_gap_positions'%tmd] = str(unique_list_of_gaps_extracellular)
gapout_dict['juxta_%s_intracellular_num_gaps'%tmd] = len(unique_list_of_gaps_intracellular)
gapout_dict['juxta_%s_exracellular_num_gaps'%tmd] = len(unique_list_of_gaps_extracellular)
pd.Series(gapout_dict).to_csv(gapout_csv_path)
# sys.stdout.write("{} gapout_dict is not saved.".format(tmd))
# sys.stdout.flush()
return acc, True, "0"
# # At the end, sets analysed to true, this is important to not overwrite
# gapout_dict["gaps_analysed"] = "True"
# # save to csv
# df.to_csv(pathdict["list_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC)
def gather_gap_densities(pathdict, s, logging):
"""Gathers the gap density data saved in the gapout_csv for each protein, and processes data for figure creation.
currently NOT compatible with multiprocessing
1) excludes proteins whose homologues could not be downloaded
2) opens gapout_csv files for each protein, appends to df_gap
3) in the original protein list, extracts the juxtamembrane region from each TMD
Parameters
----------
pathdict : dict
Dictionary of the key paths and files associated with that List number.
s : dict
Settings dictionary extracted from excel settings file.
logging : logging.Logger
Logger for printing to console and/or logfile.
If multiprocessing == True, logging.info etc will only print to console.
Returns
-------
"""
logging.info("~~~~~~~~~~~~ starting gather_gap_densities ~~~~~~~~~~~~")
######################################################################################################################
# #
# Open list of proteins. Exclude proteins whose homologues could not be downloaded. #
# #
######################################################################################################################
df = pd.read_csv(pathdict["list_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC, index_col=0)
# get list of accessions that could not be downloaded, and can immediately be excluded
not_in_homol_db = utils.get_acc_list_from_txt(pathdict["acc_not_in_homol_db_txt"])
acc_kept = set(df.index) - set(not_in_homol_db)
# filter to remove proteins not in the homologue database
df = df.loc[acc_kept, :]
# remove any proteins from list that do not have a list of TMDs
df = df.loc[df.list_of_TMDs.notnull()]
######################################################################################################################
# #
# Combine the gap data from all proteins into a single dataframe #
# #
######################################################################################################################
# create an empty dataframe for gathering the various output files
df_gap = pd.DataFrame()
# iterate over the dataframe for proteins with an existing list_of_TMDs. acc = uniprot accession.
for acc in df.index:
protein_name = df.loc[acc, 'protein_name']
sys.stdout.write(" {}".format(protein_name))
sys.stdout.flush()
# define gap output file path
gapout_csv_path = "{}_gapout.csv".format(df.loc[acc,'homol_base'])
if not os.path.exists(gapout_csv_path):
logging.info("{} {} Protein skipped. File does not exist".format(acc, gapout_csv_path))
continue
# open csv as pandas dataframe (note, it was originally a series, and contains only one column and an index)
gapout_df = pd.read_csv(gapout_csv_path, index_col=0)
gapout_df.columns = ["value"]
gapout_df.loc["acc", "value"] = acc
gapout_df.loc["list_of_TMDs", "value"] = df.loc[acc, "list_of_TMDs"]
df_gap = pd.concat([df_gap,gapout_df], axis=1)
# transpose dataframe df_gap
df_gap = df_gap.T
df_gap.set_index("acc", inplace=True)
df_gap.to_csv(pathdict["list_gap_summary_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC)
logging.info("~~~~~~~~~~~~ finished gather_gap_densities ~~~~~~~~~~~~")
# # test if the dataframe has already been created, otherwise re-open from uniprot csv file
# if os.path.isfile(pathdict["dfout10_uniprot_gaps"]):
# df = pd.read_csv(pathdict["dfout10_uniprot_gaps"], sep=",", quoting=csv.QUOTE_NONNUMERIC, index_col=[0])
# logging.info('df loaded from %s' % pathdict["dfout10_uniprot_gaps"])
# else:
# raise FileNotFoundError(
# 'No gap analysis has been done yet. %s is not found. Please run calculate calculate_gap_densities' % pathdict[
# "dfout10_uniprot_gaps"])
#df = pd.read_csv(pathdict["list_csv"], sep = ",", quoting = csv.QUOTE_NONNUMERIC, index_col = 0)
######################################################################################################################
# #
# Get juxtamembrane regions from all proteins. Code is modified from slice_TMD_1_prot_from_homol, #
# where all homologues were sliced for a particular protein #
# #
######################################################################################################################
df["list_of_TMDs"] = df.list_of_TMDs.apply(ast.literal_eval)
df["len_list_of_TMDs"] = df["list_of_TMDs"].apply(lambda x : len(x))
index_longest_list_TMDs = df["len_list_of_TMDs"].idxmax()
longest_list_TMDs = df.loc[index_longest_list_TMDs, "list_of_TMDs"]
df["is_multipass"] = df.list_of_TMDs.apply(lambda x: "TM02" in x)
# np.where syntax: np.where(boolean_query, value_if_query_true, value_if_query_false)
# @RJ, If TM01_start_in_SW_alignment is not an integer above 0, replaces with np.nan?
df['start_juxta_before_TM01'] = np.where(df['TM01_start'] > 0, 0, np.nan)
# if the TM01_start_in_SW_alignment is 0, there is no JM region N-terminal to the TMD, therefore replace end_juxta_before_TM01 with np.nan, otherwise use TM01_start_in_SW_alignment
df['end_juxta_before_TM01'] = np.where(df['TM01_start'] == 0, np.nan, df['TM01_start'])
# the start of the juxtamembrane region after TM01 is the end of TM01
df['start_juxta_after_TM01'] = df['TM01_end']
# divide into single-pass (sp) and multipass (mp) dataframes
sp = df.loc[df["is_multipass"] == False]
mp = df.loc[df["is_multipass"]]
if not mp.empty:
# for multipass proteins, the end after TM01 is the middle between TM01 and TM02
mp_end_juxta_after_TM01 = mp["TM01_end"] + ((mp["TM02_start"] - mp["TM01_end"]) / 2)
else:
# if there is no multipass in the dataset, return an empty series, which will be ignored
mp_end_juxta_after_TM01 = pd.Series()
if not sp.empty:
# for single-pass proteins, the end after TM01 is the end of the sequence
sp_end_juxta_after_TM01 = sp['seqlen']
else:
# if there is no singlepass in the dataset, return an empty series, which will be ignored
sp_end_juxta_after_TM01 = pd.Series()
# join the mp and sp together, and add to the main dataframe as integers
df['end_juxta_after_TM01'] = | pd.concat([mp_end_juxta_after_TM01, sp_end_juxta_after_TM01]) | pandas.concat |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from numbers import Number
import os
from datetime import datetime
class ThermocoupleStatistics:
@classmethod
def from_folders(cls,
path: "str",
unit1_name: "str" = 'unit 1',
unit2_name: "str" = 'unit 2',
time_col: "str" = "datetime",
old_time_col: "str" = "time",
ftype: "str" = ".csv"
)->("ThermocoupleStatistics", "list"):
"""
from_folders class method
=========================
PURPOSE
=======
This method provides a quick way to convert all TCReader's generated thermocouple data (in csv format)
into pandas.dataframe object using only the folders containing these data.
Because the dual version of TCReader is intended to be used only with 2 TC's simultaneously, both
TC units names is required, or else the analysis will not be performed.
TODO- could be generalized to handle any amount of input from TC
Args:
path: (*str)- relative or direct path to the desired measurement datasets
unit1_name: (*str)- name of the of first TC/CK-unit
unit2_name: (*str)- name of the of second TC/CK-unit
time_col: (*str)- name of the new date+ time column in a YYYY-MM-DD HH:MM:ss format
old_time_col: (*str)- name of the current time column (can be adjusted if the output csv's are changed)
ftype: (*str)- file type where the data is TCReader generated is stored (default csv- won't work with others)
Returns:
ThermocoupleStatistics object
"""
devices_params = []
unit_df1 = []
unit_df2 = []
for file in os.listdir(path):
if file.endswith(ftype):
device_params = {}
get_data = file.split("-")
unit = get_data[0]
_date = file.split(" ")[-1].split(".")[0]
try:
device_params["ck_unit"] = get_data[0]
device_params["date"] = _date
if len(get_data) == 6:
device_params["port"] = get_data[1]
device_params["tc_name"] = get_data[2]
except Exception as e:
print(f"An error has occurred:\n{e}")
devices_params.append(device_params)
if unit == unit1_name:
_path = os.path.join(path, file)
df1 = pd.read_csv(_path, engine = "c")
df1[time_col] = df1[old_time_col].apply(lambda row: row + f" {_date}")
df1[time_col] = pd.to_datetime(df1[time_col])
unit_df1.append(df1)
elif unit == unit2_name:
_path = os.path.join(path, file)
df2 = pd.read_csv(_path, engine = "c")
df2[time_col] = df2[old_time_col].apply(lambda row: row + f" {_date}")
df2[time_col] = pd.to_datetime(df2[time_col])
unit_df2.append(df2)
tc_df1 = pd.concat(unit_df1) # first df
tc_df2 = pd.concat(unit_df2) # Second df
return cls(tc_df1,tc_df2, *devices_params)
def __init__(self, tc1_df, tc2_df, *device_params):
self.tc1_df = tc1_df
self.tc2_df = tc2_df
self._merged_df = pd.DataFrame()
self._params = device_params
self._test_setpoints = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1150]
# try to get tc names
try:
self._tc_names = {unit["tc_name"] for unit in self._params}
except KeyError:
self._tc_names = set(['TC1','TC2'])
def concat_channels(self,
test_period: "int",
heat_time: "int" = 60,
col_names: "list" = ['temperature1', 'datetime1', 'temperature2', 'datetime2'],
date_sort: "bool" = False,
**pdkwargs
):
"""
PURPOSE
=======
Concatenate both thermocouple channels according to the time to a new dataframe.
Additionally, add a cumulative time sums columns in seconds.
New dataframe is named merged_df and is a property of the ThermocoupleStatistics instance.
Args:
test_period: (*int)- the tested temperature period in minutes
heat_time: (*int)- the heating period (ramp period) for each temperature preset
col_names: (*list)- list of the new column names arranged in a format of:
['temp_tc1', 'datetime_tc1','temp_tc2', 'datetime_tc2'].
Can change column names but should maintain this kind of naming convention to avoid confusion.
date_sort: (*bool)- sort merge_df according to date and time (default -False)
Return:
None
"""
# Creating a merged dataframe
self._merged_df = pd.DataFrame(**pdkwargs)
self._merged_df = pd.concat([self.tc1_df, self.tc2_df], axis=1)
# fixing the dataframe
self._merged_df.pop('time')
self._merged_df.pop('event')
self._merged_df.reset_index(drop=True, inplace=True)
self._merged_df.columns = col_names
# sort according the first datetime column
if date_sort:
self._merged_df = self._merged_df.sort_values(col_names[1])
test_period = test_period * 60 # convert to seconds
heat_time = heat_time * 60 # convert to seconds
total_setpoint_time = heat_time + test_period
datetime1 = col_names[1]
datetime2 = col_names[3]
time_offset1 = time_offset2 = self._merged_df[datetime1].loc[0]
time_cumsum1 = []
time_cumsum2 = []
for index, row in self._merged_df.iterrows():
sec_counter1 = (row[datetime1] - time_offset1).seconds
sec_counter2 = (row[datetime2] - time_offset2).seconds
time_cumsum1.append(sec_counter1)
time_cumsum2.append(sec_counter2)
if sec_counter1 >= total_setpoint_time:
time_offset1 = row[datetime1]
if sec_counter2 >= total_setpoint_time:
time_offset2 = row[datetime2]
self._merged_df["time1_cumsum"] = time_cumsum1
self._merged_df["time2_cumsum"] = time_cumsum2
def test_statistics(self,
temp_column: "str",
temp_range: "float" = 5.0,
mean_std: "bool" = True,
max_min: "bool" =False,
add_stats: "dict" = {}) -> "list":
"""
PURPOSE
=======
Calculate simple test statistics- finding maximum, minimum values and mean/stdev.
Additional statistics could be easily added into this method using numpy and the add_stats dict.
Args:
temp_column: (*str)- name of the tested temperature column
mean_std: (*bool)- add mean and standard deviation calculations of each temperature bin (default -True)
max_min: (*bool)- add maximum and minimum measurements of each temperature bin (default -False)
temp_range: (*float)- temperature range for the bins used to evaluate the statistics (default- 5.0)
add_stats: (*dict)- additional statistics dictionary. The dict format has to be {"function_name": function object}.
For instance- {"median": np.median} (no closing braces).
Calling it has to be in the format: add_stats["median"](array_like object).
Returns:
test_stat: (*dict)- dict in the {'mean': [], 'std': [], 'min': [], 'max': []} format
"""
_iterindex = 0
temp_bins = [[]]
test_stats = {'mean': [], 'std': [], 'min': [], 'max': []}
temp0 = self._merged_df[temp_column].iloc[0] # get first temp meas in the dataframe
for index, row in self._merged_df.iterrows():
if (row[temp_column] <= (temp0 + temp_range)) and (row[temp_column] > (temp0 - temp_range)):
temp_bins[_iterindex].append(row[temp_column])
else:
temp0 = row[temp_column]
_iterindex += 1
temp_bins.append([])
if mean_std:
for tempRange in temp_bins:
tempRange = np.array(tempRange)
if mean_std:
temp_mean = tempRange.mean()
temp_std = tempRange.std()
# fill dictionary
test_stats['mean'].append(temp_mean)
test_stats['std'].append(temp_std)
if max_min:
temp_min = tempRange.min()
temp_max = tempRange.max()
# fill dictionary
test_stats['min'].append(temp_min)
test_stats['max'].append(temp_max)
try:
for key in add_stats.keys():
res = add_stats[key](tempRange)
try:
test_stats[key].append(res)
except Exception:
test_stats[key] = []
test_stats[key].append(res)
except AttributeError:
pass
except Exception:
pass
pass
return test_stats
def filterTest_data_only(self, test_period: "int",
inplace: "bool" = False,
time_col:"str" = "time1_cumsum"):
"""
PURPOSE
=======
Filter the TC generated data, stored in the merged dataframe to only relevant test data.
EXPLANATION
===========
For the TC calibration lab, the current (30/09/2020) setup is a temperature ramp to
a setpoint and than a dwell period of a fixed time (test_period).
Args:
test_period: (*int)- the tested temperature period in minutes
inplace: (*bool)- replace the current merged_df dataframe with the filtered one
time_col: (*str)- name of the time column
Return:
fil_df- (*DataFrame)- the filtered data
"""
test_period *= 60 # convert into seconds
fil_df = self._merged_df[(self._merged_df[time_col] <= test_period) & (self._merged_df[time_col] > 0)]
if inplace:
try:
self._merged_df = fil_df
except Exception as e:
print(f"{type(e)}:{e}\nDataFrame was not updated")
return fil_df
def filterTest_by_date(self,
start_date: "str",
end_date: "str",
time_column: "str" = "datetime1",
inplace: "bool"= False
) -> "pandas.core.frame.DataFrame":
try:
df = self._merged_df[(self._merged_df[time_column] < end_date) & (self._merged_df[time_column] > start_date)]
except Exception as e:
print(e.args)
df[time_column] = pd.to_datetime(self._merged_df[time_column])
df = self._merged_df[(self._merged_df[time_column] < end_date) & (self._merged_df[time_column] > start_date)]
if inplace:
self._merged_df = df
return df
@property
def merged_df(self):
return self._merged_df
@property
def unit_names(self):
self._units = {unit["ck_unit"] for unit in self._params}
return self._units
@property
def tc_names(self):
return self._tc_names
@tc_names.setter
def tc_names(self, names):
self._tc_names = set(f"_{name}" for name in names)
@property
def test_setpoints(self):
"""
test_setpoints: (*array_type)- the calibration temperature presets in a list
"""
return self._test_setpoints
@test_setpoints.setter
def test_setpoints(self, setpoints):
if isinstance(setpoints, (list, tuple)):
if all(isinstance(x, (Number)) for x in setpoints):
self._test_setpoints = setpoints
else:
raise ValueError("The Test Presets file contains none numeric values.\n"
"Please change the values or the default set points will be used")
else:
raise AttributeError("Not an array type object ")
def cal_summary(self,
temp1_col: "int" = 0,
temp2_col: "int" = 2,
temp_range: "float" = 20.0,
suffixes: "tuple" = ('_TC1', '_TC2'),
add_statistics: "dict" = {},
to_csv: "bool" = False,
mean_std = True,
max_min = True,
save_dir: "str" = os.getcwd(),
file_name: "str" = "test_statistics.csv",
time_strf_format = "%Y-%m-%d %H-%M"
)->"DataFrame":
"""
PURPOSE
=======
Get a summary of the test statistics for both thermocouples
Optional- save data to a csv file.
Args:
temp1_col: (*int) - index of the columns.values list for the first TC temperature measurements
temp2_col: (*int) - index of the columns.values list for the second TC temperature measurements
temp_range: (*float)- temperature range for the bins used to evaluate the statistics (default- 20.0)
suffixes: (*tuple)- suffixes of the merged dataframe for each thermocouple.
add_statistics: (*dict)- additional statistics dictionary. The dict format has to be {"function_name": function object}.
For instance- {"median": np.median} (no closing braces)
to_csv: (*bool)- save calibration summary into csv
Returns:
summary dataframe
"""
tc_stats = []
col_names = self._merged_df.columns.values
tc_temps = [col_names[temp1_col],col_names[temp2_col]]
for temp in tc_temps:
tc_stat = self.test_statistics(temp_column=temp,
temp_range=temp_range,
add_stats=add_statistics,
mean_std = mean_std,
max_min=max_min
)
tc_stat_df = | pd.DataFrame(tc_stat, index=self.test_setpoints) | pandas.DataFrame |
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
import datetime
import utils
import thesismain
MESSAGES_GENERATED = 'generated'
MESSAGES_PROCESSED = 'processed'
class Plot:
def __init__(self, config_name, network_name, simulation_time, verbose_logs, run_simulation, plot_all):
self.plot_counts = {}
self.time_string = time.strftime('%Y-%m-%dT%H.%M.%S')
self.config_name = config_name
self.network_name = network_name
self.simulation_time = simulation_time
self.path = os.path.join('out', '%s_%s' % (self.time_string, self.config_name))
self.verbose_logs = verbose_logs
self.plot_all = plot_all
if run_simulation:
utils.run_simulation(config_name)
utils.export_to_csv(config_name)
if plot_all:
if not os.path.exists(self.path):
os.mkdir(self.path)
# utils.save_simulation_state(self.path)
self.csv = utils.parse_omnetpp_csv(config_name)
self.run = self.csv.run.str.startswith(config_name)
self.modules = self.csv.module.str.startswith(network_name, na=False)
self.all_messages, self.all_nodes = self.prepare_all_messages()
self.all_nodes_info = {}
# get node information
nodes_info = self.create_message_csv()
for node in self.all_nodes:
node_row = network_name+'.'+node
self.all_nodes_info[node] = (nodes_info['processingType'][node_row], nodes_info['processingScale'][node_row])
def __del__(self):
if os.path.exists(self.path):
print('Plotted to %s\n' % self.path)
def save_to_file(self, group, name):
if not os.path.exists(self.path):
os.mkdir(self.path)
if group in self.plot_counts:
self.plot_counts[group] += 1
else:
self.plot_counts[group] = 0
plt.savefig('%s/%s_%d_%s' % (self.path, group, self.plot_counts[group], name))
plt.clf()
def prepare_all_messages(self):
generated = pd.DataFrame(columns=['msgID', 'time'])
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_GENERATED):
c = pd.read_csv(f)
generated = generated.append(c)
generated["time"] = pd.to_numeric(generated["time"])
# convert to seconds
generated['time'] = generated['time'] / 1000
generated.sort_values(by=['time', 'msgID'], inplace=True)
# make sure we don't lose lines
generated_count = generated.msgID.shape
# get max diff and set as bin size
for f in utils.glob_csv_files(self.config_name, MESSAGES_PROCESSED):
node_id = int(f.split('_')[1][:-4])
node = 'node[%s]' % node_id
node_suffix = '_%s' % node
node_time_column = 'time%s' % node_suffix
c = pd.read_csv(f)
generated = generated.merge(c, on='msgID', how='left', suffixes=[None, node_suffix])
generated.rename(columns={node_time_column: node}, inplace=True)
generated[node] = pd.to_numeric(generated[node])
# convert to seconds
generated[node] = generated[node] / 1000
if generated.msgID.shape != generated_count:
raise Exception('Messages in %s=%s do not match generated=%s' % (node, generated.msgID.shape, generated_count))
# get all nodes
cols = list(filter(lambda x: x.startswith('node'), list(generated)))
# TODO: only needed for big files, 5000 FFA run
# utils.save_to_csv(generated, 'path', 'msgs')
return generated, cols
def egalitarian_score(self, save_plot=True, label=None):
TIME_LIMIT = 5
msgs_df = self.all_messages.copy()
# determine if node was in sync
for node in self.all_nodes:
msgs_df[node + '_sync'] = (msgs_df[node].subtract(msgs_df['time']) < TIME_LIMIT).astype(int)
# calculate how many nodes were in sync
nodes_sync = [x+'_sync' for x in self.all_nodes]
msgs_df['totals'] = msgs_df[nodes_sync].sum(axis=1)
# calculate normalized value of how many nodes were out of sync
msgs_df['egalitarian_score'] = (len(self.all_nodes) - msgs_df['totals']) / len(self.all_nodes)
if self.plot_all:
utils.save_to_csv(msgs_df, self.path, 'egalitarian_score')
df_plot = msgs_df[msgs_df['time'] < (self.simulation_time - TIME_LIMIT)]
plt.plot(df_plot['time'], df_plot['egalitarian_score'], label=label, linewidth=0.9, markevery=3.5)
# calculate mean of egalitarian score: leave away last interval bc results can be wrong due to messages not yet delivered before simulation ends
mean = df_plot['egalitarian_score'].mean()
print('Egalitarian score: %.2f' % mean)
if save_plot:
# set y-axis from 0 to max
axes = plt.gca()
# axes.set_ylim([0, 0.2])
plt.title('Egalitarian Score=%.2f' % mean)
plt.ylabel('nodes')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('egalitarian_score', 'all')
return mean
def plot_throughput(self):
msgs_df = self.all_messages.copy()
def plot_throughput(color_groups, plot_average_only):
# determine how many nodes were in sync and average
total_count = None
total_in_sync_nodes = None
total_division = None
# plot nodes
for node in self.all_nodes:
color, label = self.get_line_color(node, color_groups)
count, division = np.histogram(msgs_df[node], bins=range(self.simulation_time))
if not plot_average_only:
plt.plot(division, np.concatenate(([0], count)), label=label, color=color, linewidth=0.5)
# sum up for throughput average
if total_count is not None:
total_count += count
total_in_sync_nodes += (count > 0).astype(int)
else:
total_count = count
total_in_sync_nodes = (count > 0).astype(int)
total_division = division
# determine average
total_count = total_count / total_in_sync_nodes
# plot average
plt.plot(total_division, np.concatenate(([0], total_count)), label='avg', linewidth=1)
if plot_average_only:
# print mean value of average
print('Throughput avg mean: %.2f' % np.mean(total_count))
axes = plt.gca()
axes.set_ylim([0, 250])
plt.title('Throughput')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('throughput', 'all')
plot_throughput(False, False)
plot_throughput(True, False)
plot_throughput(True, True)
def plot_generation_rate(self):
generated = pd.DataFrame(columns=['msgID', 'time'])
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_GENERATED):
node = 'node[%s]' % int(f.split('_')[1][:-4])
gen_node = pd.read_csv(f)
gen_node['time'] = pd.to_numeric(gen_node["time"], downcast='float')
gen_node['time'] = gen_node['time'] / 1000
# total
generated = generated.append(gen_node)
count, division = np.histogram(gen_node['time'], bins=range(self.simulation_time))
plt.plot(division, np.concatenate(([0], count)), label=node, linewidth=0.5)
count, division = np.histogram(generated['time'], bins=range(self.simulation_time))
plt.plot(division, np.concatenate(([0], count)), label='total', linestyle='--')
plt.title('Generated messages')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('generated_messages', 'all')
def plot_available_processing(self):
stats = self.csv.name == 'AvailableProcessingRate'
available_processing = self.csv[self.run & self.modules & stats]
# ignore totals for now
# total = np.zeros(available_processing.iloc[0].vectime.shape)
for row in available_processing.itertuples():
# make sure to only add from nodes that did not go out of sync
# if row.vecvalue.shape == available_processing.iloc[0].vectime.shape:
# total += row.vecvalue
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.5)
# plt.plot(available_processing.iloc[0].vectime, total, label="total", linestyle='--')
plt.title('Available processing rate')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('available_processing', 'all')
def plot_inbox_solidification_buffer(self):
stats = self.csv.name == 'TotalInbox'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.5)
plt.title('Inbox+Solidification buffer length')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend()
self.save_to_file('inbox_solidification', 'all')
def thesismain_micro_inbox_solidification_buffer(self, annotation=None):
thesismain.init_plot((16, 4))
stats = self.csv.name == 'TotalInbox'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.9, markevery=0.5)
# find a specific point in a node's graph
# if node == 'node[4]':
# print(np.where(row.vecvalue == 250)[0][-1])
# print(row.vectime[np.where(row.vecvalue == 250)[0][-1]])
if annotation == 'Scenario2':
plt.annotate('out-of-sync', xy=(54, 250), xycoords='data',
xytext=(75, 190), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.annotate('out-of-sync', xy=(120, 250), xycoords='data',
xytext=(135, 190), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.ylim(bottom=-5, top=260)
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(-0.17, 1.1, 1.2, .102), loc='lower left', ncol=10, mode="expand")
thesismain.save_plot('plots/%s_%s_%s' % ('micro', 'inbox', self.config_name))
def thesismain_micro_latency(self):
msgs_df = self.all_messages.copy()
difference_suffix = '_difference'
# calculate latency for every message on every node
for node in self.all_nodes:
msgs_df[node + difference_suffix] = msgs_df[node].subtract(msgs_df['time'])
cdfs = {}
nodes_in_sync = []
msgs_count = msgs_df.shape[0]
for node in self.all_nodes:
cdf_df = self.calculate_cdf(msgs_df, node)
cdfs[node] = cdf_df
msgs_node_missing = msgs_df[node].isnull().sum()
if msgs_node_missing < msgs_count * 0.05:
nodes_in_sync.append(node)
thesismain.init_plot()
# plot
for node in self.all_nodes:
cdf_df = cdfs[node]
plt.plot(cdf_df[node + difference_suffix], cdf_df['cdf'], label=node, linewidth=0.9, markevery=0.2)
plt.ylim(bottom=-0.02, top=1.02)
plt.xlim(left=-0.2, right=6.2)
plt.ylabel('cdf')
plt.xlabel('time (s)')
thesismain.save_plot('plots/%s_%s_%s' % ('micro', 'latency', self.config_name))
def thesismain_micro_available_processing(self, annotation=None):
thesismain.init_plot()
stats = self.csv.name == 'AvailableProcessingRate'
available_processing = self.csv[self.run & self.modules & stats]
for row in available_processing.itertuples():
node = row.module.split('.')[1]
# find max's index and max value
# print(np.where(row.vecvalue == row.vecvalue.max())[0][0], row.vecvalue.max())
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.9, markevery=0.1)
# -> 89 845.0
if annotation == 'Scenario2':
plt.ylim(bottom=-5, top=310)
plt.annotate('845 MPS', xy=(89, 302), xycoords='data',
xytext=(129, 299), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
else:
plt.ylim(bottom=-5)
plt.ylabel('throughput (MPS)')
plt.xlabel('time (s)')
thesismain.save_plot('plots/%s_%s_%s' % ('micro', 'available-processing', self.config_name))
def thesismain_micro_throughput(self, annotation=None):
thesismain.init_plot()
msgs_df = self.all_messages.copy()
# determine how many nodes were in sync and average
total_count = None
total_in_sync_nodes = None
total_division = None
# plot nodes
for node in self.all_nodes:
label = node
count, division = np.histogram(msgs_df[node], bins=range(self.simulation_time))
plt.plot(division, np.concatenate(([0], count)), linewidth=0.9, markevery=0.1)
# find a specific point in a node's graph
# if node == 'node[6]':
# print(np.where(count == 0))
# sum up for throughput average
if total_count is not None:
total_count += count
total_in_sync_nodes += (count > 0).astype(int)
else:
total_count = count
total_in_sync_nodes = (count > 0).astype(int)
total_division = division
# determine average
total_count = total_count / total_in_sync_nodes
# plot average
plt.plot(total_division, np.concatenate(([0], total_count)), label='mean', linewidth=1.3, markevery=0.1)
# print mean value of average
print('Throughput avg mean: %.2f' % np.mean(total_count))
if annotation == 'Scenario1':
plt.annotate('out-of-sync', xy=(43, 0), xycoords='data',
xytext=(85, 25), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
elif annotation == 'Scenario2':
plt.annotate('out-of-sync', xy=(54, 0), xycoords='data',
xytext=(96, 25), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.annotate('out-of-sync', xy=(120, 0), xycoords='data',
xytext=(166, 25), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.ylim(bottom=-5, top=160)
plt.ylabel('throughput (MPS)')
plt.xlabel('time (s)')
# plt.legend(bbox_to_anchor=(-0.12, 1.1, 1.14, .102), loc='lower left', ncol=5, mode="expand")
plt.legend()
thesismain.save_plot('plots/%s_%s_%s' % ('micro', 'throughput', self.config_name))
def plot_solidification_buffer(self):
stats = self.csv.name == 'SolidificationBuffer'
buffer = self.csv[self.run & self.modules & stats]
for row in buffer.itertuples():
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.5)
stats = self.csv.name == 'OutstandingMessageRequests'
requests = self.csv[self.run & self.modules & stats]
for row in requests.itertuples():
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linestyle="--", linewidth=0.5)
plt.title('Solidification buffer / Outstanding requests')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('solidification_buffer', 'all')
def create_message_csv(self):
# create result dataset
name = 'generatedMessages'
result = self.csv[self.run & self.modules & (self.csv.name == name)][['module']].copy().set_index('module')
# add all scalar values
processing_type_id = self.csv[self.run & self.modules & (self.csv.name == 'processingRateFile')].set_index('module')['value']
processing_type_id.replace(to_replace=0.0, value='constant', inplace=True)
processing_type_id.replace(to_replace=1.0, value='aws', inplace=True)
processing_type_id.replace(to_replace=2.0, value='azure', inplace=True)
processing_type_id.replace(to_replace=3.0, value='dimension_data', inplace=True)
result['processingType'] = processing_type_id
self.add_scalar_value(result, 'processingScale')
self.add_scalar_value(result, 'generationRate')
self.add_scalar_value(result, 'generatedMessages')
self.add_scalar_value(result, 'droppedMessages')
self.add_scalar_value(result, 'processedMessages')
self.add_scalar_value(result, 'sentMessages')
self.add_scalar_value(result, 'sentMessageRequests')
self.add_scalar_value(result, 'sentMessageRequestResponses')
self.add_scalar_value(result, 'receivedMessages')
self.add_scalar_value(result, 'receivedMessageRequests')
self.add_scalar_value(result, 'receivedMessageRequestResponses')
if self.plot_all:
utils.save_to_csv(result, self.path, 'messages')
return result
def add_scalar_value(self, result, name):
data = self.csv[self.run & self.modules & (self.csv.name == name)].set_index('module')
result[name] = data['value']
def plot_latency(self):
msgs_df = self.all_messages.copy()
difference_suffix = '_difference'
# calculate latency for every message on every node
for node in self.all_nodes:
msgs_df[node+difference_suffix] = msgs_df[node].subtract(msgs_df['time'])
cdfs = {}
nodes_in_sync = []
msgs_count = msgs_df.shape[0]
for node in self.all_nodes:
cdf_df = self.calculate_cdf(msgs_df, node)
cdfs[node] = cdf_df
msgs_node_missing = msgs_df[node].isnull().sum()
if msgs_node_missing < msgs_count * 0.05:
nodes_in_sync.append(node)
def plot_latency_percentile(percentiles):
nodes_difference_columns = [x + difference_suffix for x in nodes_in_sync]
for percentile in percentiles:
percentile_name = 'percentile_' + str(percentile)
percentile_name_difference = percentile_name + '_difference'
percentile_df = pd.DataFrame(columns=[percentile_name_difference])
percentile_df[percentile_name_difference] = msgs_df[nodes_difference_columns].quantile(percentile, axis=1)
cdf_df = self.calculate_cdf(percentile_df, percentile_name)
plt.plot(cdf_df[percentile_name_difference], cdf_df['cdf'], label=str(percentile), linewidth=0.7)
# print mean value of CDF
np_arr = cdf_df['cdf'].to_numpy()
m_index = np.where(np_arr >= 0.5)[0][0]
print('Percentile CDF %.2f mean: %.2f' % (percentile, cdf_df[percentile_name_difference][m_index]))
plt.title('Latencies percentile of in-sync nodes')
plt.ylabel('latency (s)')
plt.xlabel('time')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('latencies', 'percentiles')
def plot_latency_cdf(color_groups, sync_nodes_only=False):
for node in self.all_nodes:
if not sync_nodes_only or (sync_nodes_only and node in nodes_in_sync):
color, label = self.get_line_color(node, color_groups)
cdf_df = cdfs[node]
plt.plot(cdf_df[node+difference_suffix], cdf_df['cdf'], label=label, color=color, linewidth=0.7)
file_name = 'latencies'
if sync_nodes_only:
file_name = file_name + '_sync_only'
plt.title(file_name)
plt.ylabel('cdf')
plt.xlabel('time')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file(file_name, 'all')
def plot_latency_over_time(color_groups):
# plot latency over time
for node in self.all_nodes:
color, label = self.get_line_color(node, color_groups)
plt.plot(msgs_df['time'], msgs_df[node+difference_suffix], label=label, color=color, linewidth=0.5)
plt.title('Latencies over time')
plt.ylabel('latency (s)')
plt.xlabel('time')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.81)
self.save_to_file('latencies', 'over_time')
plot_latency_cdf(False)
plot_latency_cdf(True)
plot_latency_cdf(False, sync_nodes_only=True)
plot_latency_cdf(True, sync_nodes_only=True)
plot_latency_percentile([0.5, 0.9, 0.95])
plot_latency_over_time(False)
plot_latency_over_time(True)
def get_line_color(self, node, color_groups):
if not color_groups:
return None, node
processing_scale = self.all_nodes_info[node][1]
if 0 <= processing_scale < 1:
return 'red', '0<=x<1'
elif 1 <= processing_scale < 1.2:
return 'orange', '1<=x<1.2'
elif 1.2 <= processing_scale < 1.5:
return 'blue', '1.2<=x<1.5'
elif 1.5 <= processing_scale:
return 'green', '1.5<=x'
def calculate_cdf(self, df, node):
node_difference = node+'_difference'
stats_df = df.groupby([node_difference])[node_difference].agg('count').pipe(pd.DataFrame).rename(columns={node_difference: 'frequency'})
stats_df['pdf'] = stats_df['frequency'] / sum(stats_df['frequency'])
stats_df['cdf'] = stats_df['pdf'].cumsum()
stats_df = stats_df.reset_index()
if not os.path.exists(self.path):
os.mkdir(self.path)
utils.save_to_csv(stats_df, self.path, 'latency_' + node)
return stats_df
class PlotBasic(Plot):
def __init__(self, config_name, network_name, simulation_time, verbose_logs=True, run_simulation=True, plot_all=True):
super().__init__(config_name, network_name, simulation_time, verbose_logs, run_simulation, plot_all)
if plot_all:
if verbose_logs:
self.plot_buffers()
self.plot_available_processing()
self.plot_solidification_buffer()
self.plot_inbox_solidification_buffer()
self.plot_throughput()
self.plot_latency()
self.plot_generation_rate()
self.egalitarian_score()
def plot_buffers(self):
stats = self.csv.name == 'InboxLength'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
plt.plot(row.vectime, row.vecvalue, label=node, linewidth=0.5)
plt.title('Inbox length')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend()
self.save_to_file('buffers', 'all')
class PlotV1(Plot):
def __init__(self, config_name, network_name, simulation_time, verbose_logs=True, run_simulation=True, plot_all=True):
super().__init__(config_name, network_name, simulation_time, verbose_logs, run_simulation, plot_all)
if plot_all:
if verbose_logs:
self.plot_buffers()
self.plot_available_processing()
self.plot_solidification_buffer()
self.plot_inbox_solidification_buffer()
self.plot_sending_rate()
self.plot_throughput()
self.plot_latency()
self.plot_generation_rate()
self.egalitarian_score()
def create_message_csv(self):
# create result dataset
name = 'generatedMessages'
result = self.csv[self.run & self.modules & (self.csv.name == name)][['module']].copy().set_index('module')
# add all scalar values
processing_type_id = \
self.csv[self.run & self.modules & (self.csv.name == 'processingRateFile')].set_index('module')['value']
processing_type_id.replace(to_replace=0.0, value='constant', inplace=True)
processing_type_id.replace(to_replace=1.0, value='aws', inplace=True)
processing_type_id.replace(to_replace=2.0, value='azure', inplace=True)
processing_type_id.replace(to_replace=3.0, value='dimension_data', inplace=True)
result['processingType'] = processing_type_id
self.add_scalar_value(result, 'processingScale')
self.add_scalar_value(result, 'generationRate')
self.add_scalar_value(result, 'generatedMessages')
self.add_scalar_value(result, 'droppedMessages')
self.add_scalar_value(result, 'processedMessages')
self.add_scalar_value(result, 'sentMessages')
self.add_scalar_value(result, 'sentMessageRequests')
self.add_scalar_value(result, 'sentMessageRequestResponses')
self.add_scalar_value(result, 'receivedMessages')
self.add_scalar_value(result, 'receivedMessageRequests')
self.add_scalar_value(result, 'receivedMessageRequestResponses')
self.add_scalar_value(result, 'sentHealthMessages')
self.add_scalar_value(result, 'receivedHealthMessages')
# TODO: there are some issues with the matrix, therefore ignore for now
# nodes = list(result.index.values)
# for n in nodes:
# self.add_scalar_value(result, 'droppedMessages-%s' % n)
if self.plot_all:
utils.save_to_csv(result, self.path, 'messages')
return result
def plot_buffers(self, to_node=None):
group = 'buffers'
totals = {}
# plot inboxes
stats = self.csv.name == 'InboxLength'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
totals[node] = row.vecvalue
plt.plot(row.vectime, row.vecvalue, label='In ' + node, linewidth=0.5)
plt.title('Inboxes')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.79)
self.save_to_file(group, 'inboxes')
# plot outboxes
stats = self.csv.name.str.startswith('OutboxLength')
outbox_lengths = self.csv[self.run & self.modules & stats]
for row in outbox_lengths.itertuples():
node = row.module.split('.')[1]
outbox = row.name.split('.')[1]
if to_node is None or to_node == outbox:
plt.plot(row.vectime, row.vecvalue, label=('O %s>%s' % (node, outbox)), linewidth=0.5)
plt.title('Outboxes')
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(1, 1), loc="upper left")
plt.subplots_adjust(right=0.70)
if to_node is None:
self.save_to_file(group, 'outboxes')
else:
self.save_to_file(group, 'outboxes' + to_node)
# TODO: figure out a way to interpolate or do histogram to be able to sum them together
# # plot totals
# for node, total in totals.items():
# plt.plot(range(len(total)), total, label='InOut ' + node)
#
# plt.title('Buffer totals')
# plt.ylabel('messages')
# plt.xlabel('time (s)')
# plt.legend()
# self.save_to_file(group, 'totals')
def plot_sending_rate(self):
stats = self.csv.name == 'Health'
health = self.csv[self.run & self.modules & stats]
fig, ax1 = plt.subplots()
ax1.set_xlabel('time (s)')
ax1.set_ylabel('health')
ax1.tick_params(axis='y')
for row in health.itertuples():
ax1.plot(row.vectime, row.vecvalue, label='health ' + row.module.split('.')[1], linestyle='--', linewidth=0.5)
# instantiate a second axes that shares the same x-axis
ax2 = ax1.twinx()
ax2.set_ylabel('sending rate (M/s)')
ax2.tick_params(axis='y')
stats = self.csv.name.str.startswith('SendingRate')
sending_rates = self.csv[self.run & self.modules & stats]
for row in sending_rates.itertuples():
node = row.module.split('.')[1]
to = row.name.split('.')[1]
ax2.plot(row.vectime, row.vecvalue, label=('R %s>%s' % (node, to)), linewidth=0.5)
plt.title('Health vs. sending rate')
ax1.legend()
ax2.legend(bbox_to_anchor=(1, 1), loc="upper left")
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.subplots_adjust(right=0.70)
self.save_to_file('sending_rates', 'all')
def plot_allowed_receiving_rate(self, print_node=None, malicious_node=None, annotation=None):
thesismain.init_plot()
# plot inboxes
stats = self.csv.name == 'AllowedReceivingRate'
allowed_receiving_rate = self.csv[self.run & self.modules & stats]
print_node_values = None
for row in allowed_receiving_rate.itertuples():
node = row.module.split('.')[1]
if print_node is None or print_node == node:
print_node_values = row.vecvalue
plt.plot(row.vectime, row.vecvalue, label='allowed', linewidth=1.7, markevery=0.2)
# plot actual receiving rate
stats = self.csv.name.str.startswith('ReceivingRate')
actual_receiving_rates = self.csv[self.run & self.modules & stats]
for row in actual_receiving_rates.itertuples():
receiver = row.module.split('.')[1]
sender = row.name.split('.')[1]
if print_node is None or print_node == receiver:
# find a specific point in a node's graph
# if sender == 'node[7]':
# print(len(row.vectime))
# print(row.vecvalue[-1])
# if malicious_node == sender:
# for count, val in enumerate(row.vecvalue):
# if len(print_node_values) > count:
# if val > print_node_values[count]:
# print('%d: %0.2f %0.2f <--' % (count, print_node_values[count], val))
# else:
# print('%d: %0.2f %0.2f' % (count, print_node_values[count], val))
plt.plot(row.vectime, row.vecvalue, label=('%s' % (sender)), linewidth=0.9)
# plt.ylim(bottom=-5, top=260)
if annotation == 'ExceedSendingRateAttack':
plt.annotate('dropped', xy=(20, 110.66), xycoords='data',
xytext=(27, 60), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
elif annotation == 'SubceedSendingRateAttack':
plt.annotate('dropped', xy=(48, 10), xycoords='data',
xytext=(60, 40), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.ylabel('rate (MPS)')
plt.xlabel('time (s)')
# plt.legend(bbox_to_anchor=(-0.12, 1.1, 1.14, .102), loc='lower left', ncol=5, mode="expand")
thesismain.save_plot('plots/%s_%s_%s' % ('attack', 'receiving_rate', self.config_name))
def plot_outboxes(self, print_node=None, annotation=None):
thesismain.init_plot()
# plot outboxes
stats = self.csv.name.str.startswith('OutboxLength')
outbox_lengths = self.csv[self.run & self.modules & stats]
for row in outbox_lengths.itertuples():
node = row.module.split('.')[1]
outbox = row.name.split('.')[1]
if print_node is None or node == print_node:
plt.plot(row.vectime, row.vecvalue, label=outbox, linewidth=0.9, markevery=0.1)
if outbox == 'node[1]':
print(len(row.vectime))
print(row.vecvalue[-1])
plt.ylim(top=260)
axes = plt.axes()
axes.set_yticks([0, 100, 200, 250])
if annotation == 'LowHealthAttack':
plt.annotate('dropped', xy=(17, 250), xycoords='data',
xytext=(25, 200), textcoords='data',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='top',
)
plt.ylabel('messages')
plt.xlabel('time (s)')
plt.legend(bbox_to_anchor=(-0.12, 1.1, 1.14, .102), loc='lower left', ncol=5, mode="expand")
thesismain.save_plot('plots/%s_%s_%s' % ('attack', 'outboxes', self.config_name))
class Serial20(Plot):
def __init__(self, config_name, network_name, simulation_time):
super().__init__(config_name, network_name, simulation_time)
def plot_issue(self):
# calculate non-disseminated messages at 10s and 60s
times = [10000, 60000]
time_frame = 5000
generated = pd.DataFrame(columns=['msgID', 'time'])
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_GENERATED):
c = pd.read_csv(f)
generated = generated.append(c)
generated["time"] = pd.to_numeric(generated["time"])
totals = None
# make sure we don't lose lines
generated_count = generated.msgID.shape
print(generated)
for f in utils.glob_csv_files(self.config_name, MESSAGES_PROCESSED):
node = 'node[%s]' % f.split('_')[1][0]
c = pd.read_csv(f)
merged = generated.merge(c, on='msgID')
if merged.msgID.shape != generated_count:
raise Exception(
'Messages in %s=%s do not match generated=%s' % (node, merged.msgID.shape, generated_count))
# calculate diff
merged['difference'] = merged.time_y.subtract(merged.time_x)
for time in times:
non_disseminated = merged[ (merged['time_x'] <= (time-time_frame)) & (merged['time_y'] > time) ]
print('\n--------------------')
print('%s: time:%d - time_frame:%d' % (node, time, time_frame))
print(non_disseminated)
print(non_disseminated['msgID'].count())
return
lines1 = []
lines2 = []
names = []
fig, ax1 = plt.subplots()
stats = self.csv.name == 'AvailableProcessingRate'
available_processing = self.csv[self.run & self.modules & stats]
for row in available_processing.itertuples():
node = row.module.split('.')[1]
names.append(node)
markevery = 0.1
if node == 'node[1]':
marker = 'v'
markevery = (0.05, 0.1)
elif node == 'node[2]':
marker = '.'
else:
marker = '^'
l, = ax1.plot(row.vectime, row.vecvalue, label=node, linestyle='dashed', marker=marker, markevery=markevery)
lines1.append(l)
# instantiate a second axes that shares the same x-axis
ax2 = ax1.twinx()
stats = self.csv.name == 'InboxLength'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
markevery = 0.1
if node == 'node[1]':
marker = 'v'
markevery = (0.05, 0.1)
elif node == 'node[2]':
marker = '.'
else:
marker = '^'
l, = ax2.plot(row.vectime, row.vecvalue, label=node, marker=marker, markevery=markevery)
lines2.append(l)
ax1.axvline(60, alpha=0.8, linestyle='dotted', color='gray')
ax1.set_xlabel('time (s)')
ax1.set_ylabel('processing rate (messages)')
ax1.tick_params(axis='y')
ax1.set_ylim(bottom=0)
ax2.set_ylabel('inbox buffer (messages)')
ax2.tick_params(axis='y')
ax2.set_ylim(bottom=0)
# need to use ax1 for the legend bc otherwise 'tight_layout' clips it away
# ax1.legend([(lines1[i], lines2[i]) for i in range(len(lines1))], names, handler_map={tuple: HandlerTuple(ndivide=None)},
# bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, mode="expand")
ax1.legend(lines2, names,
bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, mode="expand")
fig.set_dpi(300)
fig.set_size_inches(6.4, 2.15)
fig.tight_layout() # otherwise the right y-label is slightly clipped
# plt.title('Issue')
# ax1.legend()
self.save_to_file('procesing_vs_inbox_example', 'all')
def plot_available_processing(self):
node1 = None
node2 = None
others = None
fig, (ax1, ax2) = plt.subplots(2)
stats = self.csv.name == 'AvailableProcessingRate'
available_processing = self.csv[self.run & self.modules & stats]
for row in available_processing.itertuples():
node = row.module.split('.')[1]
if node == 'node[2]':
node1, = ax1.plot(row.vectime, row.vecvalue, label=node, color='green', marker='.', markevery=0.1)
elif node == 'node[4]':
node2, = ax1.plot(row.vectime, row.vecvalue, label=node, color='purple', marker='x', markevery=0.1)
else:
others, = ax1.plot(row.vectime, row.vecvalue, label='others', color='gray')
ax1.set_ylabel('messages')
ax1.set_title('Available processing rate')
ax1.set_ylim(bottom=0)
ax1.set_xticklabels([]) # remove x labels
# need to use ax1 for the legend bc otherwise 'tight_layout' clips it away
ax1.legend([node1, node2, others], ['node[2]', 'node[4]', 'others'], bbox_to_anchor=(0., 1.17, 1., .102), loc='lower left', ncol=3, mode="expand")
# plot generated messages
generated = pd.DataFrame(columns=['msgID', 'time'])
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_GENERATED):
node = 'node[%s]' % f.split('_')[1][0]
gen_node = pd.read_csv(f)
gen_node['time'] = pd.to_numeric(gen_node["time"], downcast='float')
gen_node['time'] = gen_node['time'] / 1000
# total
generated = generated.append(gen_node)
count, division = np.histogram(gen_node['time'], bins=range(self.simulation_time))
# print(node, count)
# if count[0] == 0:
# ax2.plot(division, np.concatenate(([0], count)))
# else:
# ax2.plot(division, np.concatenate(([0], count)), label=node)
count, division = np.histogram(generated['time'], bins=range(self.simulation_time))
ax2.plot(division, np.concatenate(([0], count)), label='total', linestyle=':')
ax2.set_title('Total generated messages')
ax2.set_ylabel('messages')
ax2.set_xlabel('time (s)')
fig.set_dpi(300)
fig.set_size_inches(6.4, 4)
fig.tight_layout() # otherwise the right y-label is slightly clipped
self.save_to_file('available_processing', 'all')
def plot_inboxes(self):
node1 = None
node2 = None
others = None
fig, ax1 = plt.subplots()
stats = self.csv.name == 'InboxLength'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
if node == 'node[2]':
node1, = ax1.plot(row.vectime, row.vecvalue, label=node, color='green', marker='.', markevery=0.1)
elif node == 'node[4]':
node2, = ax1.plot(row.vectime, row.vecvalue, label=node, color='purple', marker='x', markevery=0.1)
else:
others, = ax1.plot(row.vectime, row.vecvalue, label='others', color='gray')
# plot inboxes for V1
config_name, network_name = 'Scenario1V1', 'Scenario1V1'
self.csv = utils.parse_omnetpp_csv(config_name)
self.run = self.csv.run.str.startswith(config_name)
self.modules = self.csv.module.str.startswith(network_name, na=False)
stats = self.csv.name == 'InboxLength'
inbox_lengths = self.csv[self.run & self.modules & stats]
for row in inbox_lengths.itertuples():
node = row.module.split('.')[1]
if node == 'node[2]':
ax1.plot(row.vectime, row.vecvalue, label=node, color='green', marker='.', markevery=0.1, linestyle=(0, (5, 3)))
elif node == 'node[4]':
ax1.plot(row.vectime, row.vecvalue, label=node, color='purple', marker='x', markevery=0.1, linestyle=(0, (5, 3)))
else:
ax1.plot(row.vectime, row.vecvalue, label='others', color='gray')
ax1.set_ylabel('inbox length (messages)')
ax1.set_xlabel('time (s)')
ax1.legend([node1, node2, others], ['node[2]', 'node[4]', 'others'], bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, mode="expand")
fig.set_dpi(300)
fig.set_size_inches(6.4, 3.3)
fig.tight_layout() # otherwise the right y-label is slightly clipped
self.save_to_file('inboxes', 'all')
def plot_throughput(self):
self.config_name = 'Scenario1'
node1 = None
node2 = None
others = None
fig, (ax1, ax2) = plt.subplots(2)
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_PROCESSED):
node = 'node[%s]' % f.split('_')[1][0]
pro_node = pd.read_csv(f)
pro_node['time'] = pd.to_numeric(pro_node["time"], downcast='float')
pro_node['time'] = pro_node['time'] / 1000
if node == 'node[2]':
count, division = np.histogram(pro_node['time'], bins=range(self.simulation_time))
node1, = ax1.plot(division, np.concatenate(([0], count)), label=node, color='green', marker='.', markevery=0.1)
elif node == 'node[4]':
count, division = np.histogram(pro_node['time'], bins=range(self.simulation_time))
node2, = ax1.plot(division, np.concatenate(([0], count)), label=node, color='purple', marker='x', markevery=0.1)
else:
count, division = np.histogram(pro_node['time'], bins=range(self.simulation_time))
others, = ax1.plot(division, np.concatenate(([0], count)), label=node, color='gray')
ax1.set_ylabel('messages')
ax1.set_title('Throughput without Healthor')
ax1.set_xticklabels([]) # remove x labels
ax1.legend([node1, node2, others], ['node[2]', 'node[4]', 'others'], bbox_to_anchor=(0., 1.17, 1., .102), loc='lower left', ncol=3, mode="expand")
# plot throughput for V1
self.config_name = 'Scenario1V1'
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_PROCESSED):
node = 'node[%s]' % f.split('_')[1][0]
pro_node = pd.read_csv(f)
pro_node['time'] = | pd.to_numeric(pro_node["time"], downcast='float') | pandas.to_numeric |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from copy import deepcopy as dcopy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.core.base import DataError
from scipy import interpolate
from time import perf_counter as timerpc
from flasc import utilities as fsut
from floris.utilities import wrap_360, wrap_180
def _run_fi_serial(df_subset, fi, include_unc=False,
unc_pmfs=None, unc_options=None, verbose=False):
"""Evaluate the FLORIS solutions for a set of wind directions,
wind speeds and turbulence intensities in serial (non-
parallelized) mode.
Args:
df_subset ([pd.DataFrame]): Dataframe containing the columns
'wd', 'ws' and 'ti'. The FLORIS power predictions will be
calculated for each row/set of ambient conditions.
fi ([floris]): FLORIS object for the farm of interest.
verbose (bool, optional): Print information to terminal, used
for debugging. Defaults to False.
Returns:
df_out ([pd.DataFrame]): Identical to the inserted dataframe,
df_subset, but now with additional columns containing the
predicted power production for each turbine, as pow_000, ...
pow_00N.
"""
nturbs = len(fi.layout_x)
df_out = df_subset.sort_values(by=["wd", "ws"])
use_model_params = ('model_params_dict' in df_subset.columns)
use_yaw = ('yaw_000' in df_subset.columns)
if (use_model_params | include_unc):
raise NotImplementedError("Functionality not yet implemented since moving to floris v3.0.")
# Specify dataframe columns
pow_cols = ["pow_{:03d}".format(ti) for ti in range(nturbs)]
ws_cols = ["ws_{:03d}".format(ti) for ti in range(nturbs)]
wd_cols = ["wd_{:03d}".format(ti) for ti in range(nturbs)]
ti_cols = ["ti_{:03d}".format(ti) for ti in range(nturbs)]
yaw_rel = np.zeros((df_out.shape[0], nturbs))
if use_yaw:
yaw_cols = ['yaw_%03d' % ti for ti in range(nturbs)]
wd = np.array(df_out['wd'], dtype=float)
yaw_rel = wrap_180(
(
np.array(df_out[yaw_cols], dtype=float) -
np.stack((wd,) * nturbs, axis=0).T
)
)
if np.any(np.abs(yaw_rel) > 30.0):
raise DataError('Yaw should be defined in domain [0, 360) deg.')
if 'ti' not in df_out.columns:
df_out['ti'] = np.min(fi.floris.farm.turbulence_intensity)
# Perform grid-style calculation, if possible
n_unq = (
df_out["ws"].nunique() *
df_out["wd"].nunique() *
df_out["ti"].nunique()
)
if n_unq == df_out.shape[0]:
# Reformat things to grid style calculation
wd_array = np.sort(df_out["wd"].unique())
ws_array = np.sort(df_out["ws"].unique())
ti = df_out["ti"].unique()[0]
# Specify interpolant to map data appropriately
X, Y = np.meshgrid(wd_array, ws_array, indexing='ij')
if use_yaw:
# Map the yaw angles in the appropriate format
F = interpolate.NearestNDInterpolator(
df_out[["wd", "ws"]],
yaw_rel
)
yaw_angles = F(X, Y)
else:
yaw_angles = np.zeros((len(wd_array), len(ws_array), nturbs))
# Calculate the FLORIS solutions in grid-style
fi.reinitialize(
wind_directions=wd_array,
wind_speeds=ws_array,
turbulence_intensity=ti,
)
fi.calculate_wake(yaw_angles=yaw_angles)
turbine_powers = fi.get_turbine_powers(
# include_unc=include_unc,
# unc_pmfs=unc_pmfs,
# unc_options=unc_options
)
# Format the found solutions back to the dataframe format
Fp = interpolate.NearestNDInterpolator(
np.vstack([X.flatten(), Y.flatten()]).T,
np.reshape(turbine_powers, (-1, nturbs))
)
Fws = interpolate.NearestNDInterpolator(
np.vstack([X.flatten(), Y.flatten()]).T,
np.reshape(
np.mean(fi.floris.flow_field.u, axis=(3, 4)),
(-1, nturbs)
)
)
Fti = interpolate.NearestNDInterpolator(
np.vstack([X.flatten(), Y.flatten()]).T,
np.reshape(
fi.floris.flow_field.turbulence_intensity_field[:, :, :, 0, 0],
(-1, nturbs)
)
)
# Finally save solutions to the dataframe
df_out.loc[df_out.index, pow_cols] = Fp(df_out[["wd", "ws"]]) / 1000.0
df_out.loc[df_out.index, wd_cols] = np.tile(df_out["wd"], (nturbs, 1)).T
df_out.loc[df_out.index, ws_cols] = Fws(df_out[["wd", "ws"]])
df_out.loc[df_out.index, ti_cols] = Fti(df_out[["wd", "ws"]])
else:
# If cannot process in grid-style format, process one by one (SLOW)
for iii, idx in enumerate(df_out.index):
if (
verbose and
((np.remainder(idx, 100) == 0) or idx == df_out.shape[0]-1)
):
print(' Progress: finished %.1f percent (%d/%d cases).'
% (100.*idx/df_out.shape[0], idx, df_out.shape[0]))
# # Update model parameters, if present in dataframe
# if use_model_params:
# params = df_out.loc[idx, 'model_params_dict']
# fi.set_model_parameters(params=params, verbose=False)
fi.reinitialize(
wind_speeds=[df_out.loc[idx, 'ws']],
wind_directions=[df_out.loc[idx, 'wd']],
turbulence_intensity=df_out.loc[idx, 'ti']
)
fi.calculate_wake(np.expand_dims(yaw_rel[iii, :], axis=[0, 1]))
turbine_powers = np.squeeze(
fi.get_turbine_powers(
# include_unc=include_unc,
# unc_pmfs=unc_pmfs,
# unc_options=unc_options
)
)
df_out.loc[idx, pow_cols] = turbine_powers / 1000.
df_out.loc[idx, wd_cols] = np.repeat(
df_out.loc[idx, 'wd'],
nturbs # Assumed to be uniform
)
df_out.loc[idx, ws_cols] = np.squeeze(
np.mean(fi.floris.flow_field.u, axis=(3, 4))
)
df_out.loc[idx, ti_cols] = np.squeeze(
fi.floris.flow_field.turbulence_intensity_field
)
return df_out
def calc_floris(df, fi, num_workers, job_worker_ratio=5, include_unc=False,
unc_pmfs=None, unc_options=None, use_mpi=False):
"""Calculate the FLORIS predictions for a particular wind direction, wind speed
and turbulence intensity set. This function calculates the exact solutions.
Args:
df ([pd.DataFrame]): Dataframe with at least the columns 'time', 'wd'
and 'ws'. Can optionally also have the column 'ti' and 'time'.
If the dataframe has columns 'yaw_000' through 'yaw_<nturbs>', then it
will calculate the floris solutions for those yaw angles too.
If the dataframe has column 'model_params_dict', then it will change
the floris model parameters for every run with the values therein.
fi ([FlorisInterface]): Floris object for the wind farm of interest
Returns:
[type]: [description]
"""
nturbs = len(fi.layout_x)
# Create placeholders
df[['pow_%03d' % ti for ti in range(nturbs)]] = np.nan
# Copy yaw angles, if possible
yaw_cols = ['yaw_%03d' % ti for ti in range(nturbs)]
yaw_cols = [c for c in yaw_cols if c in df.columns]
if len(yaw_cols) > 0:
if np.any(df[yaw_cols] < 0.):
raise DataError('Yaw should be defined in domain [0, 360) deg.')
# Split dataframe into subset dataframes for parallelization, if necessary
if num_workers > 1:
df_list = []
# See if we can simply split the problem up into a grid of conditions
num_jobs = num_workers * job_worker_ratio
n_unq = df["ws"].nunique() * df["wd"].nunique() * df["ti"].nunique()
if n_unq == df.shape[0]:
# Data is a grid of atmospheric conditions. Can divide and exploit
# the benefit of grid processing in floris v3.0.
Nconds_per_ti = df["ws"].nunique() * df["wd"].nunique()
Njobs_per_ti = int(np.floor(num_jobs / df["ti"].nunique()))
dN = int(np.ceil(Nconds_per_ti / Njobs_per_ti))
for ti in df["ti"].unique():
df_subset = df[df["ti"] == ti]
for ij in range(Njobs_per_ti):
df_list.append(df_subset.iloc[(ij*dN):((ij+1)*dN)])
else:
# If cannot be formatted to grid style, split blindly
dN = int(np.ceil(df.shape[0] / num_jobs))
for ij in range(num_jobs):
df_list.append(df.iloc[(ij*dN):((ij+1)*dN)])
# Calculate solutions
start_time = timerpc()
if num_workers <= 1:
print("Calculating floris solutions (non-parallelized)")
df_out = _run_fi_serial(
df_subset=df,
fi=fi,
include_unc=include_unc,
unc_pmfs=unc_pmfs,
unc_options=unc_options,
verbose=True
)
else:
print('Calculating with num_workers = %d and job_worker_ratio = %d'
% (num_workers, job_worker_ratio))
print('Each thread contains about %d FLORIS evaluations.' % dN)
# Define a tuple of arguments
multiargs = []
for df_mp in df_list:
df_mp = df_mp.reset_index(drop=True)
multiargs.append(
(df_mp, dcopy(fi), include_unc, unc_pmfs, unc_options, False)
)
if use_mpi:
# Use an MPI implementation, useful for HPC
from mpi4py.futures import MPIPoolExecutor as pool_executor
else:
# Use Pythons internal multiprocessing functionality
from multiprocessing import Pool as pool_executor
with pool_executor(num_workers) as pool:
df_list = pool.starmap(_run_fi_serial, multiargs)
df_out = pd.concat(df_list).reset_index(drop=True)
if 'index' in df_out.columns:
df_out = df_out.drop(columns='index')
t = timerpc() - start_time
print('Finished calculating the FLORIS solutions for the dataframe.')
print('Total wall time: %.3f s.' % t)
print('Mean wall time / function evaluation: %.3f s.' % (t/df.shape[0]))
return df_out
def interpolate_floris_from_df_approx(
df,
df_approx,
method='linear',
verbose=True
):
# Format dataframe and get number of turbines
df = df.reset_index(drop=('time' in df.columns))
nturbs = fsut.get_num_turbines(df_approx)
# Check if turbulence intensity is provided in the dataframe 'df'
if 'ti' not in df.columns:
if df_approx["ti"].nunique() > 3:
raise ValueError("You must include a 'ti' column in your df.")
ti_ref = np.median(df_approx["ti"])
print("No 'ti' column found in dataframe. Assuming {}".format(ti_ref))
df["ti"] = ti_ref
# Define which variables we must map from df_approx to df
varnames = ['pow']
if 'ws_000' in df_approx.columns:
varnames.append('ws')
if 'wd_000' in df_approx.columns:
varnames.append('wd')
if 'ti_000' in df_approx.columns:
varnames.append('ti')
# Map individual data entries to full DataFrame
if verbose:
print("Mapping the precalculated solutions " +
"from FLORIS to the dataframe...")
print(" Creating a gridded interpolant with " +
"interpolation method '%s'." % method)
# Make a copy from wd=0.0 deg to wd=360.0 deg for wrapping
if not (df_approx["wd"] == 360.0).any():
df_subset = df_approx[df_approx["wd"] == 0.0].copy()
df_subset["wd"] = 360.0
df_approx = pd.concat([df_approx, df_subset], axis=0).reset_index(drop=True)
# Copy TI to lower and upper bound
df_ti_lb = df_approx.loc[df_approx["ti"] == df_approx['ti'].min()].copy()
df_ti_ub = df_approx.loc[df_approx["ti"] == df_approx['ti'].max()].copy()
df_ti_lb["ti"] = 0.0
df_ti_ub["ti"] = 1.0
df_approx = pd.concat(
[df_approx, df_ti_lb, df_ti_ub],
axis=0
).reset_index(drop=True)
# Copy WS to lower and upper bound
df_ws_lb = df_approx.loc[df_approx["ws"] == df_approx['ws'].min()].copy()
df_ws_ub = df_approx.loc[df_approx["ws"] == df_approx['ws'].max()].copy()
df_ws_lb["ws"] = 0.0
df_ws_ub["ws"] = 99.0
df_approx = pd.concat(
[df_approx, df_ws_lb, df_ws_ub],
axis=0
).reset_index(drop=True)
# Convert df_approx dataframe into a regular grid
wd_array_approx = np.sort(df_approx["wd"].unique())
ws_array_approx = np.sort(df_approx["ws"].unique())
ti_array_approx = np.sort(df_approx["ti"].unique())
xg, yg, zg = np.meshgrid(
wd_array_approx,
ws_array_approx,
ti_array_approx,
indexing='ij',
)
grid_dict = dict()
for varname in varnames:
colnames = ['{:s}_{:03d}'.format(varname, ti) for ti in range(nturbs)]
f = interpolate.NearestNDInterpolator(
df_approx[["wd", "ws", "ti"]],
df_approx[colnames]
)
grid_dict["{:s}".format(varname)] = f(xg, yg, zg)
# Prepare an minimal output dataframe
cols_to_copy = ["wd", "ws", "ti"]
if "time" in df.columns:
cols_to_copy.append("time")
df_out = df[cols_to_copy].copy()
# Use interpolant to determine values for all turbines and variables
for varname in varnames:
if verbose:
print(' Interpolating ' + varname + ' for all turbines...')
colnames = ['{:s}_{:03d}'.format(varname, ti) for ti in range(nturbs)]
f = interpolate.RegularGridInterpolator(
points=(wd_array_approx, ws_array_approx, ti_array_approx),
values=grid_dict[varname],
method=method,
bounds_error=False,
)
df_out.loc[df_out.index, colnames] = f(df[['wd', 'ws', 'ti']])
return df_out
def calc_floris_approx_table(
fi,
wd_array=np.arange(0.0, 360.0, 1.0),
ws_array=np.arange(0.001, 26.001, 1.0),
ti_array=None,
):
# if ti_array is None, use the current value in the FLORIS object
if ti_array is None:
ti = fi.floris.flow_field.turbulence_intensity
ti_array = np.array([ti], dtype=float)
fi = fi.copy() # Create independent copy that we can manipulate
num_turbines = len(fi.layout_x)
# Format input arrays
wd_array = np.sort(wd_array)
ws_array = np.sort(ws_array)
ti_array = np.sort(ti_array)
wd_mesh, ws_mesh = np.meshgrid(wd_array, ws_array, indexing='ij')
N_approx = len(wd_array) * len(ws_array) * len(ti_array)
print(
'Generating a df_approx table of FLORIS solutions ' +
'covering a total of {:d} cases.'.format(N_approx)
)
# Create solutions, one set per turbulence intensity
df_list = []
for turb_intensity in ti_array:
# Calculate solutions
fi.reinitialize(
wind_directions=wd_array,
wind_speeds=ws_array,
turbulence_intensity=turb_intensity,
)
fi.calculate_wake()
turbine_powers = fi.get_turbine_powers()
# Create a dictionary to save solutions in
solutions_dict = {"wd": wd_mesh.flatten(), "ws": ws_mesh.flatten()}
solutions_dict["ti"] = turb_intensity * np.ones(len(wd_array) * len(ws_array))
for turbi in range(num_turbines):
solutions_dict["pow_{:03d}".format(turbi)] = \
turbine_powers[:, :, turbi].flatten()
df_list.append( | pd.DataFrame(solutions_dict) | pandas.DataFrame |
# import pandas and numpy, and load the nls data
import pandas as pd
pd.set_option('display.width', 80)
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.0f}'.format
nls97 = pd.read_csv("data/nls97f.csv")
nls97.set_index("personid", inplace=True)
nls97add = pd.read_csv("data/nls97add.csv")
# look at some of the nls data
nls97.head()
nls97.shape
nls97add.head()
nls97add.shape
# check for unique ids
nls97.originalid.nunique()==nls97.shape[0]
nls97add.originalid.nunique()==nls97add.shape[0]
# create some mismatched ids
nls97 = nls97.sort_values('originalid')
nls97add = nls97add.sort_values('originalid')
nls97.iloc[0:2, -1] = nls97.iloc[0:2, -1] + 10000
nls97.originalid.head(2)
nls97add.iloc[0:2, 0] = nls97add.iloc[0:2, 0] + 20000
nls97add.originalid.head(2)
# use join to do a left join
nlsnew = nls97.join(nls97add.set_index(['originalid']))
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# do a left join with merge
nlsnew = pd.merge(nls97, nls97add, on=['originalid'], how="left")
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# do a right join
nlsnew = pd.merge(nls97, nls97add, on=['originalid'], how="right")
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# do an inner join
nlsnew = pd.merge(nls97, nls97add, on=['originalid'], how="inner")
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# do an outer join
nlsnew = pd.merge(nls97, nls97add, on=['originalid'], how="outer")
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# create a function to check id mismatches
def checkmerge(dfleft, dfright, idvar):
dfleft['inleft'] = "Y"
dfright['inright'] = "Y"
dfboth = pd.merge(dfleft[[idvar,'inleft']],\
dfright[[idvar,'inright']], on=[idvar], how="outer")
dfboth.fillna('N', inplace=True)
print( | pd.crosstab(dfboth.inleft, dfboth.inright) | pandas.crosstab |
import pandas as pd
import numpy as np
import datetime as dt
from tqdm import tqdm
from ..utils.parralel import *
def getDailyVol(close, span0=100, days=1):
# daily vol reindexed to close
df0 = close.index.searchsorted(close.index - pd.Timedelta(days=days))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0 - 1], index=close.index[close.shape[0] - df0.shape[0]:])
df0 = close.loc[df0.index] / close.loc[df0.values].values - 1 # daily returns
df0 = df0.ewm(span=span0).std()
return df0.dropna()
def getTEvents_mid(gRaw, h):
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna()
for i in tqdm(diff.index[1:]):
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h: sNeg=0;tEvents.append(i)
elif sPos>h: sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
def getTEvents(gRaw, h):
if ('ask' not in gRaw) and ('bid' not in gRaw):
return getTEvents_mid(gRaw, h)
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff() # bid vs bid and ask vs ask
diff_short = np.log(gRaw.ask / gRaw.bid.shift(1)) # returns from selling @bid(T-1) and buying @ask(T+0)
diff_long = np.log(gRaw.bid / gRaw.ask.shift(1)) # returns from buying @ask(T-1) and selling @bid(T+0)
for i in tqdm(diff.index[1:]):
pos, neg = sPos + diff_long.loc[i], sNeg + diff_short[i]
sPos, sNeg = max(0., sPos + diff.ask.loc[i]), min(0., sNeg + diff.bid.loc[i])
if pos > h:
sPos = 0;
tEvents.append(i);
elif neg < -h:
sNeg = 0;
tEvents.append(i)
return pd.DatetimeIndex(tEvents)
def addVerticalBarrier(tEvents, close, numDays=1):
""" Generates timeindex of events where the vertical barrier was reached within numDays
:param tEvents: events when upper or lower barrier was reached
:param close: dataframe/series of closing prices
:param numDays: max number of days to hold the position
:return: sorted pillars
"""
t1 = close.index.searchsorted(tEvents + pd.Timedelta(days=numDays))
t1 = t1[t1 < close.shape[0]] #removing times that are beyond those in consideration
t1 = ( | pd.Series(close.index[t1], index=tEvents[:t1.shape[0]]) | pandas.Series |
"""Routines for manipulating a Pandas dataframe.
These are a collection of useful tools for mainpulating data series as
represented by a Pandas dataframe.
"""
import datetime as dt
import re
from functools import partial
import pandas as pd
import numpy as np
from .common import SeaIceTimeseriesInvalidArgument
from .common import DEFAULT_QUANTILES
import seaice.nasateam as nt
def filter_failed_qa(df):
"""Return frame with np.nan set for all rows that have failed_qa set to true"""
df = df.copy()
if 'failed_qa' in df.columns:
df['failed_qa'] = df['failed_qa'].replace(np.nan, False)
df['failed_qa'] = df['failed_qa'].astype('bool')
df.loc[df.failed_qa, (df.columns.difference(['failed_qa', 'filename']))] = np.nan
df.loc[df.failed_qa, ['filename']] = ''
return df
def collapse_hemisphere_index(df):
"""Returns frame with 'hemisphere' index level removed from index and added
as a data column"""
updated_frame = df.copy()
return updated_frame.reset_index(level='hemisphere', drop=False)
def filter_hemisphere(df, hemisphere):
"""Return data frame filtering all rows where hemisphere not equal to input value."""
if hemisphere in nt.VALID_HEMISPHERES:
return df[df['hemisphere'] == hemisphere]
raise SeaIceTimeseriesInvalidArgument('Must provide a valid hemisphere for filtering')
def filter_before(df, date_):
"""Return df excluding rows where index dates are before input date_"""
try:
return df[df.index >= date_]
except (AttributeError, TypeError):
return df
def filter_after(df, date_):
"""Return df excluding rows where index dates after input date_"""
try:
return df[df.index <= date_]
except (AttributeError, TypeError):
return df
def filter_columns(df_in, columns=[]):
"""Return a subset of columns from the input DataFrame"""
df = df_in.copy()
if columns:
df = df_in[columns]
return df
def interpolate_df(df_in, limit, columns=[]):
"""Interpolate the data columns in the input dataframe
Arguments:
--------
df_in: input dataframe to interpolate
limit: number of nan values to interpolate across.
columns: a list of columns to interpolate. If empty then every column
but nt.METADATA_COLUMNS are interpolated. [Those are columns
with metadata like 'hemisphere', 'filename', and
'source_dataset']
"""
if not columns:
columns = list(set(df_in.columns) - set(nt.METADATA_COLUMNS))
columns.sort()
df = pd.DataFrame()
for col in columns:
df[col] = df_in[col].interpolate(limit=limit)
return df
def _series_name(date_index):
start = date_index[0].year
end = date_index[-1].year
if start == end:
return str(start)
else:
return '{start}-{end}'.format(start=start, end=end)
def _stacked_clim(series, clim_years):
"""Return a stacked climatology
Arguments
-------
series : input daily series with a DateTimeIndex.
clim_years: (begin, end) tuple of years to use to compute statistics.
The input series is reordered into 366 day of year(rows) by
clim_years(columns) and returned as a Dataframe
"""
start_date = dt.date(clim_years[0], 1, 1)
periods = 366
list_of_clim_years = list(np.arange(clim_years[0], clim_years[1]+1))
stacked_clim = _reorder_daily_series_by_years(series, start_date, periods=periods,
years=list_of_clim_years)
stacked_clim.index = stacked_clim.index + 1
stacked_clim.index.name = 'day of year'
return stacked_clim
def _reorder_daily_series_by_years(series, start, end=None, periods=None, years=[]):
"""Gather timeseries data into "aligned" years.
series: Daily series with datetime index with period of
1-Day
start : string or datetime-like, left bound for generating dates
end: string or datetime-like, right bound for generating dates
periods: number of days of data to select.
years: years to align series subsets. defaults to the left bounds's year
This function is used to "align" days beyond a starting epoch across
different years. The index is a zero based index representing the number
of days past the left-bounds. So the 0th index value is the start date,
1st value is one day beyond that.
"""
default_index = pd.date_range(start=start, end=end, periods=periods, freq='D')
periods = len(default_index)
stacked_array = pd.DataFrame(index=np.arange(len(default_index)))
stacked_array.index.name = default_index[0].to_pydatetime().strftime('%Y-%m-%d')
if not years:
years = [default_index[0].year]
for year in years:
shift_years = year - default_index[0].year
shift_start = default_index[0] + | pd.DateOffset(years=shift_years) | pandas.DateOffset |
#!/usr/bin/env python3
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import os
import math
from scipy.stats import mannwhitneyu, ttest_ind
from nutris import nutris
BASEPATH = "/data"
app = dash.Dash(__name__)
app.config['suppress_callback_exceptions']=True
def combine_all_data():
print("getting new data")
survey_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_object_dtype
import numpy as np
import seaborn as sns
import squarify
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import matplotlib.cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from statsmodels.stats.weightstats import ztest
from statsmodels.stats.proportion import proportions_ztest
from scipy import stats
from IPython.display import display_html
import os
import sys
from prettierplot.plotter import PrettierPlot
from prettierplot import style
def eda_cat_target_cat_feat(self, feature, level_count_cap=50, color_map="viridis", legend_labels=None,
chart_scale=15):
"""
Documentation:
---
Description:
Creates exploratory data visualizations and statistical summaries for a category feature
in the context of a categorical target.
---
Parameters:
feature : str
Feature to visualize.
level_count_cap : int, default=50
Maximum number of unique levels in feature. If the number of levels exceeds the
cap, then no visualization panel is produced.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
legend_labels : list, default=None
Class labels displayed in plot legend.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
# if number of unique levels in feature is less than specified level_count_cap
if (len(np.unique(self.data[self.data[feature].notnull()][feature].values)) < level_count_cap):
### data summaries
## feature summary
# create empty DataFrame
uni_summ_df = pd.DataFrame(columns=[feature, "Count", "Proportion"])
# capture unique values and count of those unique values
unique_vals, unique_counts = np.unique(
self.data[self.data[feature].notnull()][feature], return_counts=True
)
# append each unique value, count and proportion to DataFrame
for i, j in zip(unique_vals, unique_counts):
uni_summ_df = uni_summ_df.append(
{
feature: i,
"Count": j,
"Proportion": j / np.sum(unique_counts) * 100,
},
ignore_index=True,
)
# sort DataFrame by "Proportion", descending
uni_summ_df = uni_summ_df.sort_values(by=["Proportion"], ascending=False)
# set values to int dtype where applicable to optimize
uni_summ_df["Count"] = uni_summ_df["Count"].astype("int64")
if is_numeric_dtype(uni_summ_df[feature]):
uni_summ_df[feature] = uni_summ_df[feature].astype("int64")
## feature vs. target summary
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# groupby category feature and count the occurrences of target classes
# for each level in category
bi_summ_df = (
bi_df.groupby([feature] + [self.target.name])
.size()
.reset_index()
.pivot(columns=self.target.name, index=feature, values=0)
)
# overwrite DataFrame index with actual class labels if provided
bi_summ_df.columns = pd.Index(legend_labels) if legend_labels is not None else pd.Index([i for i in bi_summ_df.columns.tolist()])
bi_summ_df.reset_index(inplace=True)
# fill nan's with zero
fill_columns = bi_summ_df.iloc[:,2:].columns
bi_summ_df[fill_columns] = bi_summ_df[fill_columns].fillna(0)
# set values to int dtype where applicable to optimize displayed DataFrame
for column in bi_summ_df.columns:
try:
bi_summ_df[column] = bi_summ_df[column].astype(np.int)
except ValueError:
bi_summ_df[column] = bi_summ_df[column]
## proportion by category summary
# combine feature column and target
prop_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
prop_df = prop_df[prop_df[feature].notnull()]
# calculate percent of 100 by class label
prop_df = prop_df.groupby([feature, self.target.name]).agg({self.target.name : {"count"}})
prop_df = prop_df.groupby(level=0).apply(lambda x: 100 * x / float(x.sum()))
prop_df = prop_df.reset_index()
multiIndex = prop_df.columns
singleIndex = [i[0] for i in multiIndex.tolist()]
singleIndex[-1] = "Count"
prop_df.columns = singleIndex
prop_df = prop_df.reset_index(drop=True)
prop_df = pd.pivot_table(prop_df, values=["Count"], columns=[feature], index=[self.target.name], aggfunc={"Count": np.mean})
prop_df = prop_df.reset_index(drop=True)
multiIndex = prop_df.columns
singleIndex = []
for column in multiIndex.tolist():
try:
singleIndex.append(int(column[1]))
except ValueError:
singleIndex.append(column[1])
prop_df.columns = singleIndex
prop_df = prop_df.reset_index(drop=True)
# insert column to DataFrame with actual class labels if provided, otherwise use raw class labels in target
prop_df.insert(loc=0, column="Class", value=legend_labels if legend_labels is not None else np.unique(self.target))
# fill nan's with zero
fill_columns = prop_df.iloc[:,:].columns
prop_df[fill_columns] = prop_df[fill_columns].fillna(0)
# if there are only two class labels, perform z-test/t-test
if len(np.unique(bi_df[bi_df[feature].notnull()][feature])) == 2:
# total observations
total_obs1 = bi_df[(bi_df[feature] == np.unique(bi_df[feature])[0])][
feature
].shape[0]
total_obs2 = bi_df[(bi_df[feature] == np.unique(bi_df[feature])[1])][
feature
].shape[0]
# total positive observations
pos_obs1 = bi_df[
(bi_df[feature] == np.unique(bi_df[feature])[0])
& (bi_df[self.target.name] == 1)
][feature].shape[0]
pos_obs2 = bi_df[
(bi_df[feature] == np.unique(bi_df[feature])[1])
& (bi_df[self.target.name] == 1)
][feature].shape[0]
# perform z-test, return z-statistic and p-value
z, p_val = proportions_ztest(
count=(pos_obs1, pos_obs2), nobs=(total_obs1, total_obs2)
)
# add z-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"z-test statistic": z, "p-value": p_val}],
columns=["z-test statistic", "p-value"],
index=[feature],
).round(4)
# display summary tables
self.df_side_by_side(
dfs=(uni_summ_df, bi_summ_df, prop_df, stat_test_df),
names=["Feature summary", "Feature vs. target summary", "Target proportion", "Statistical test",],
)
if "percent_positive" in bi_summ_df:
bi_summ_df = bi_summ_df.drop(["percent_positive"], axis=1)
else:
# display summary tables
self.df_side_by_side(
dfs=(uni_summ_df, bi_summ_df, prop_df),
names=["Feature summary", "Feature vs. target summary", "Target proportion"],
)
if "percent_positive" in bi_summ_df:
bi_summ_df = bi_summ_df.drop(["percent_positive"], axis=1)
### visualizations
# set label rotation angle
len_unique_val = len(unique_vals)
avg_len_unique_val = sum(map(len, str(unique_vals))) / len(unique_vals)
if len_unique_val <= 4 and avg_len_unique_val <= 12:
rotation = 0
elif len_unique_val >= 5 and len_unique_val <= 8 and avg_len_unique_val <= 8:
rotation = 0
elif len_unique_val >= 9 and len_unique_val <= 14 and avg_len_unique_val <= 4:
rotation = 0
else:
rotation = 90
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_narrow")
# add canvas to prettierplot object
ax = p.make_canvas(title="Category counts\n* {}".format(feature), position=131, title_scale=0.82)
# add treemap to canvas
p.tree_map(
counts=uni_summ_df["Count"].values,
labels=uni_summ_df[feature].values,
colors=style.color_gen(name=color_map, num=len(uni_summ_df[feature].values)),
alpha=0.8,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Category counts by target\n* {}".format(feature), position=132)
# add faceted categorical plot to canvas
p.facet_cat(
df=bi_summ_df,
feature=feature,
label_rotate=rotation,
color_map=color_map,
bbox=(1.0, 1.15),
alpha=0.8,
legend_labels=legend_labels,
x_units=None,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Target proportion by category\n* {}".format(feature), position=133)
# add stacked bar chart to canvas
p.stacked_bar_h(
df=prop_df.drop("Class", axis=1),
bbox=(1.0, 1.15),
legend_labels=legend_labels,
color_map=color_map,
alpha=0.8,
ax=ax,
)
plt.show()
def eda_cat_target_num_feat(self, feature, color_map="viridis", outliers_out_of_scope=None, legend_labels=None,
chart_scale=15):
"""
Documentation:
---
Description:
Creates exploratory data visualizations and statistical summaries for a number
feature in the context of a categorical target.
---
Parameters:
feature : str
Feature to visualize.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
outliers_out_of_scope : boolean, float or int, default=None
Truncates the x-axis upper limit so that outliers are out of scope of the visualization.
The x-axis upper limit is reset to the maximum non-outlier value.
To identify outliers, the IQR is calculated, and values that are below the first quartile
minus the IQR, or above the third quarterile plus the IQR are designated as outliers. If True
is passed as a value, the IQR that is subtracted/added is multiplied by 5. If a float or int is
passed, the IQR is multiplied by that value. Higher values increase how extremem values need
to be to be identified as outliers.
legend_labels : list, default=None
Class labels displayed in plot legend.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates larger plots
and increases visual elements proportionally.
"""
### data summaries
## bivariate roll_up table
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# bivariate summary statistics
bi_summ_stats_df = pd.DataFrame(
columns=["Class", "Count", "Proportion", "Mean", "StdDev"]
)
# for each unique class label
for labl in np.unique(self.target):
# get feature values associated with single class label
feature_slice = bi_df[bi_df[self.target.name] == labl][feature]
# append summary statistics for feature values associated with class label
bi_summ_stats_df = bi_summ_stats_df.append(
{
"Class": labl,
"Count": len(feature_slice),
"Proportion": len(feature_slice) / len(bi_df[feature]) * 100,
"Mean": np.mean(feature_slice),
"StdDev": np.std(feature_slice),
},
ignore_index=True,
)
# apply custom legend labels, or set dtype to int if column values are numeric
if legend_labels is not None:
bi_summ_stats_df["Class"] = legend_labels
elif is_numeric_dtype(bi_summ_stats_df["Class"]):
bi_summ_stats_df["Class"] = bi_summ_stats_df["Class"].astype(np.int)
## Feature summary
describe_df = pd.DataFrame(bi_df[feature].describe()).reset_index()
# add missing percentage
describe_df = describe_df.append(
{
"index": "missing",
feature: np.round(self.data.shape[0] - bi_df[feature].shape[0], 5),
},
ignore_index=True,
)
# add skew
describe_df = describe_df.append(
{
"index": "skew",
feature: np.round(stats.skew(bi_df[feature].values, nan_policy="omit"), 5),
},
ignore_index=True,
)
# add kurtosis
describe_df = describe_df.append(
{
"index": "kurtosis",
feature: stats.kurtosis(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.rename(columns={"index": ""})
# execute z-test or t-test
if len(np.unique(self.target)) == 2:
s1 = bi_df[
(bi_df[self.target.name] == bi_df[self.target.name].unique()[0])
][feature]
s2 = bi_df[
(bi_df[self.target.name] == bi_df[self.target.name].unique()[1])
][feature]
if len(s1) > 30 and len(s2) > 30:
# perform z-test, return z-statistic and p-value
z, p_val = ztest(s1, s2)
# add z-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"z-test statistic": z, "p-value": p_val}],
columns=["z-test statistic", "p-value"],
index=[feature],
).round(4)
else:
# perform t-test, return t-score and p-value
t, p_val = stats.ttest_ind(s1, s2)
# add t-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"t-test statistic": t, "p-value": p_val}],
columns=["t-test statistic", "p-value"],
index=[feature],
).round(4)
# display summary tables
self.df_side_by_side(
dfs=(describe_df, bi_summ_stats_df, stat_test_df),
names=["Feature summary", "Feature vs. target summary", "Statistical test"],
)
else:
# display summary tables
self.df_side_by_side(
dfs=(describe_df, bi_summ_stats_df),
names=["Feature summary", "Feature vs. target summary"],
)
### visualizations
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_standard")
# if boolean is passed to outliers_out_of_scope
if isinstance(outliers_out_of_scope, bool):
# if outliers_out_of_scope = True
if outliers_out_of_scope:
# identify outliers using IQR method and an IQR step of 5
outliers = self.outlier_IQR(self.data[feature], iqr_step=5)
# reset x-axis minimum and maximum
x_axis_min = self.data[feature].drop(index=outliers).min()
x_axis_max = self.data[feature].drop(index=outliers).max()
# if outliers_out_of_scope is a float or int
elif isinstance(outliers_out_of_scope, float) or isinstance(outliers_out_of_scope, int):
# identify outliers using IQR method and an IQR step equal to the float/int passed
outliers = self.outlier_IQR(self.data[feature], iqr_step=outliers_out_of_scope)
# reset x-axis minimum and maximum
x_axis_min = self.data[feature].drop(index=outliers).min()
x_axis_max = self.data[feature].drop(index=outliers).max()
# add canvas to prettierplot object
ax = p.make_canvas(
title="Feature distribution\n* {}".format(feature),
title_scale=0.85,
position=221,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# add distribution plot to canvas
p.dist_plot(
bi_df[feature].values,
color=style.style_grey,
y_units="f",
x_units=x_units,
ax=ax,
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min, x_axis_max)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Probability plot\n* {}".format(feature),
title_scale=0.85,
position=222,
)
# add QQ / probability plot to canvas
p.prob_plot(
x=bi_df[feature].values,
plot=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Distribution by class\n* {}".format(feature),
title_scale=0.85,
position=223,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# generate color list
color_list = style.color_gen(name=color_map, num=len(np.unique(self.target)))
# add one distribution plot to canvas for each category class
for ix, labl in enumerate(np.unique(bi_df[self.target.name].values)):
p.dist_plot(
bi_df[bi_df[self.target.name] == labl][feature].values,
color=color_list[ix],
y_units="f",
x_units=x_units,
legend_labels=legend_labels if legend_labels is not None else np.arange(len(np.unique(self.target))),
alpha=0.4,
bbox=(1.0, 1.0),
ax=ax,
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min, x_axis_max)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Boxplot by class\n* {}".format(feature),
title_scale=0.85,
position=224,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# add horizontal box plot to canvas
p.box_plot_h(
x=feature,
y=self.target.name,
data=bi_df,
alpha=0.7,
x_units=x_units,
legend_labels=legend_labels,
bbox=(1.2, 1.0),
suppress_outliers=True,
ax=ax
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min-(x_axis_min * 0.1), x_axis_max)
# apply position adjustment to subplots
plt.subplots_adjust(bottom=-0.1)
plt.show()
def eda_num_target_num_feat(self, feature, color_map="viridis", chart_scale=15):
"""
Documentation:
---
Description:
Produces exploratory data visualizations and statistical summaries for a numeric
feature in the context of a numeric target.
---
Parameters:
feature : str
Feature to visualize.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
### data summaries
## feature summary
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# cast target as float
bi_df[self.target.name] = bi_df[self.target.name].astype(float)
# create summary statistic table
describe_df = pd.DataFrame(bi_df[feature].describe()).reset_index()
# add skew and kurtosis to describe_df
describe_df = describe_df.append(
{
"index": "skew",
feature: stats.skew(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.append(
{
"index": "kurtosis",
feature: stats.kurtosis(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.rename(columns={"index": ""})
# display summary tables
display(describe_df)
### visualizations
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_narrow")
# add canvas to prettierplot object
ax = p.make_canvas(
title="Feature distribution\n* {}".format(feature), position=131, title_scale=1.2
)
# determine x-units precision based on magnitude of max value
if -1 <= np.nanmax(bi_df[feature].values) <= 1:
x_units = "fff"
elif -10 <= np.nanmax(bi_df[feature].values) <= 10:
x_units = "ff"
else:
x_units = "f"
# determine y-units precision based on magnitude of max value
if -1 <= np.nanmax(bi_df[feature].values) <= 1:
y_units = "fff"
elif -10 <= np.nanmax(bi_df[feature].values) <= 10:
y_units = "ff"
else:
y_units = "f"
# x rotation
if -10000 < np.nanmax(bi_df[feature].values) < 10000:
x_rotate = 0
else:
x_rotate = 45
# add distribution plot to canvas
p.dist_plot(
bi_df[feature].values,
color=style.style_grey,
y_units=y_units,
x_rotate=x_rotate,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Probability plot\n* {}".format(feature), position=132)
# add QQ / probability plot to canvas
p.prob_plot(x=bi_df[feature].values, plot=ax)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Regression plot - feature vs. target\n* {}".format(feature),
position=133,
title_scale=1.5
)
# add regression plot to canvas
p.reg_plot(
x=feature,
y=self.target.name,
data=bi_df,
x_jitter=0.1,
x_rotate=x_rotate,
x_units=x_units,
y_units=y_units,
ax=ax,
)
plt.show()
def eda_num_target_cat_feat(self, feature, level_count_cap=50, color_map="viridis", chart_scale=15):
"""
Documentation:
---
Description:
Produces exploratory data visualizations and statistical summaries for a category
feature in the context of a numeric target.
---
Parameters:
feature : str
Feature to visualize.
level_count_cap : int, default=50
Maximum number of unique levels in feature. If the number of levels exceeds the
cap then the feature is skipped.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
# if number of unique levels in feature is less than specified level_count_cap
if (len(np.unique(self.data[self.data[feature].notnull()][feature].values)) < level_count_cap):
### data summaries
## feature summary
# create empty DataFrame
uni_summ_df = pd.DataFrame(columns=[feature, "Count", "Proportion"])
# capture unique values and count of those unique values
unique_vals, unique_counts = np.unique(
self.data[self.data[feature].notnull()][feature], return_counts=True
)
# append each unique value, count and proportion to DataFrame
for i, j in zip(unique_vals, unique_counts):
uni_summ_df = uni_summ_df.append(
{feature: i, "Count": j, "Proportion": j / np.sum(unique_counts) * 100},
ignore_index=True,
)
# sort DataFrame by "Proportion", descending
uni_summ_df = uni_summ_df.sort_values(by=["Proportion"], ascending=False)
# set values to int dtype where applicable to optimize
if is_numeric_dtype(uni_summ_df[feature]):
uni_summ_df[feature] = uni_summ_df[feature].astype("int64")
uni_summ_df["Count"] = uni_summ_df["Count"].astype("int64")
## feature vs. target summary
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# cast target as float
bi_df[self.target.name] = bi_df[self.target.name].astype(float)
# create pivot table of target summary statistics, grouping by category feature
bi_summ_piv_df = pd.pivot_table(
bi_df, index=feature, aggfunc={self.target.name: [np.nanmin, np.nanmax, np.nanmean, np.nanmedian, np.nanstd]}
)
multi_index = bi_summ_piv_df.columns
single_index = pd.Index([i[1] for i in multi_index.tolist()])
bi_summ_piv_df.columns = single_index
bi_summ_piv_df.reset_index(inplace=True)
bi_summ_piv_df = bi_summ_piv_df.rename(columns={
"nanmin":"Min",
"nanmax":"Max",
"nanmean":"Mean",
"nanmedian":"Median",
"nanstd":"StdDev",
}
)
# fill nan's with zero
fill_columns = bi_summ_piv_df.iloc[:,1:].columns
bi_summ_piv_df[fill_columns] = bi_summ_piv_df[fill_columns].fillna(0)
# reorder column
bi_summ_piv_df = bi_summ_piv_df[[feature,"Mean","Median","StdDev","Min","Max"]]
# convert to int
if | is_numeric_dtype(bi_summ_piv_df[feature]) | pandas.api.types.is_numeric_dtype |
# Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.cache import TMP_DIR
class BacktestTestCase(unittest.TestCase):
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
def test_complain_if_prices_to_signals_not_implemented(self):
"""
Tests error handling when prices_to_signals hasn't been implemented.
"""
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
# Volume
5000,
16000,
8800
],
"FI23456": [
# Close
12,
11,
8.50,
# Volume
15000,
14000,
28800
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with self.assertRaises(NotImplementedError) as cm:
Moonshot().backtest()
self.assertIn("strategies must implement prices_to_signals", repr(cm.exception))
def test_basic_long_only_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a basic
long-only strategy that largely relies on the default methods.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
0.0,
0.0,
1.0],
"FI23456": [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.0,
0.0,
1.0],
"FI23456": [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.0,
0.0],
"FI23456": ["nan",
0.5,
0.0,
1.0]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0,
1.0,
0,
0],
"FI23456": [0,
1.0,
0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.0],
"FI23456": ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]}
)
def test_basic_long_short_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a basic
long-short strategy that largely relies on the default methods.
"""
class BuyBelow10ShortAbove10(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
"FI12345": [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
"FI23456": [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
"FI12345": [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
"FI23456": [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "Sid"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_prices", new=mock_get_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [1.0,
-1.0,
-1.0,
1.0],
"FI23456": [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
-0.5,
-0.5,
0.5],
"FI23456": [0.5,
-0.5,
0.5,
-0.5]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.5,
0.5,
0.5,
0.5],
"FI23456": [0.5,
0.5,
0.5,
0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
-0.5,
-0.5],
"FI23456": ["nan",
0.5,
-0.5,
0.5]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
0.5,
0.5],
"FI23456": ["nan",
0.5,
0.5,
0.5]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0,
1.0,
1.0,
1.0],
"FI23456": [0,
1.0,
1.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": ["nan",
0.5,
1.0,
0.0],
"FI23456": ["nan",
0.5,
1.0,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
0.0,
0.0],
"FI23456": [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
"FI12345": [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
0.0242857], # (9.99 - 10.50)/10.50 * -0.5
"FI23456": [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
-0.1176471] # (10.50 - 8.50)/8.50 * -0.5
}
)
def test_long_short_strategy_override_methods(self):
"""
Tests that the resulting DataFrames are correct after running a
long-short strategy that overrides the major backtesting methods.
"""
class BuyBelow10ShortAbove10Overnight(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10 and holds overnight.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Open"] <= 10
short_signals = prices.loc["Open"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
# enter on close same day
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
# hold on close till next day open
closes = prices.loc["Close"]
opens = prices.loc["Open"]
pct_changes = (opens - closes.shift()) / closes.shift()
gross_returns = pct_changes * positions.shift()
return gross_returns
def mock_get_prices(*args, **kwargs):
dt_idx = | pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from ..utils import constants, utils
import numpy as np
import pandas as pd
from collections import Counter
import xlsxwriter
"""
Create a heatmap that depicts the temporal pattern for any dataset that includes a datetime
column. The heatmap does not include a spatial dimension. Also, the module was created for HVI
targeting. This means that when using it for HVI targeting you should filter the input dataset
spatially before running. See the readme file for more information. There are two functions:
one that creates a heatmap with month of year and one that creates a heatmap with week of year.
------------------------
Example
from RADGIS.preprocessing import pol_heatmap
pol_heatmap.heatmap(dataframe, temporalBinType, save_location, timestamp_column=constants.DATETIME)
"""
# Helper functions
# Fills in missing days.
def _addDays(df):
all_days = pd.date_range(df.just_date.min(), df.just_date.max(), freq="D")
po = all_days
po = po.map(lambda t: t.strftime('%Y-%m-%d'))
all_days = set(po)
dfDaySet = set(df.just_date.tolist())
difference = set(map(str,all_days)) - dfDaySet
if len(difference) >= 1:
counter = -1
for d in difference:
# Add this as the default. It's a placeholder and will be updated later outside of this function.
df.loc[counter] = [0,"01", "Monday", d]
counter -= 1
else:
pass
# Fills in missing hours.
def _add_Hours(x):
fullHourSet = set(range(0, 23+1))
hourSet = set(x.HourOfDay)
difference = fullHourSet - hourSet
return list(difference)
# Main function that formats the data.
def heatmap(dataframe, temporalBinType, save_location, timestamp_column=constants.DATETIME):
'''
Return a xlsx file saved to disk.
:param dataframe: pandas DataFrame or TrajDataFrame
DataFrame or TrajDataFrame to be plotted
:param temporalBinType: determines the temporal extent of the scatter plot. Options are:
"MonthOfYear"
"WeekOfYear"
:param save_location: path and name with file extention (i.e., save_location=r"D:\Projects\20191211_TemporalChart_POL\file_name.xlsx")
:param timestamp_column: DataFrame or TrajDataFrame column that contains the datetime information.
Default is constants.DATETIME, which applies if TrajDataFrame and the original tdf datetime column is used.
'''
df = dataframe.copy()
# Add the columns and formatting that will be used to create the heatmap.
# A conditional statement based on the user's temporalBinType input.
if temporalBinType == "MonthOfYear":
df[temporalBinType] = df[timestamp_column].dt.year.astype(str) + '-' + df[timestamp_column].dt.month.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
elif temporalBinType == "WeekOfYear":
df[temporalBinType] = df[timestamp_column].dt.year.astype(str) + '-' + df[timestamp_column].dt.strftime('%U').apply(lambda x: "0" + x if len(x) == 1 else x)
# Create a column for the day of week name and for the hour of day. Add a leading 0 to the hour of day if it's a single digit.
df["DayOfWeek"] = df[timestamp_column].dt.day_name()
df['HourOfDay'] = df[timestamp_column].dt.hour.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
# Count the events by binning them into the selected temporalBinType, day of week name, and hour of day.
df["TotalHrByYearMonthDayNameHour"] = df.groupby([temporalBinType, "DayOfWeek", "HourOfDay"])[timestamp_column].transform(lambda x: len(x.dt.date.unique()))
# Add the date column, which will be used to fill in the missing days using the _addDays helper function.
df['just_date'] = df[timestamp_column].dt.date
df["just_date"] = df.just_date.map(lambda t: t.strftime('%Y-%m-%d'))
# Only keep the required columns. This is cleaner and decreases the size of the dataset that is processed.
df = df.filter(items=["TotalHrByYearMonthDayNameHour", "HourOfDay", "DayOfWeek", "just_date"])
# Fill in missinges days that are within the min/max range.
_addDays(df)
df.reset_index(drop=True,inplace=True)
# Redo these two columns now that the missing dates have been filled in since there could be new rows
# that represent newly added days with the placeholder values inserted from the _addDays function. Also, the just_date
# column is redone because it needs to be a pandas timestamp.
df["just_date"] = pd.to_datetime(df.just_date)
df["DayOfWeek"] = df.just_date.dt.day_name()
# Redo this column because we did not include it when we filtered the dataframe. It was not included
# because it had to be redone since we added missings days.
if temporalBinType == "MonthOfYear":
df[temporalBinType] = df['just_date'].dt.year.astype(str) + '-' + df['just_date'].dt.month.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
elif temporalBinType == "WeekOfYear":
df[temporalBinType] = df['just_date'].dt.year.astype(str) + '-' + df['just_date'].dt.strftime('%U').apply(lambda x: "0" + x if len(x) == 1 else x)
df["HourOfDay"] = df["HourOfDay"].astype(int)
# Create a groupby object that is passed to the _add_Hours helper function that fills in missing hours.
day_Group = df.groupby(["just_date"], sort=False)
# Fill in missing hours.
results = day_Group.apply(_add_Hours)
results = pd.DataFrame(results)
# This takes the index, which is the date, and makes it into a column.
results["just_date"]=results.index.get_level_values("just_date")
# Rename the column that is comprised of the missing hours. Now called HourOfDay.
results.rename(columns={0:"HourOfDay"}, inplace=True)
# Take each row and stack it on the date where the hours of the day that are in a list are stacked vertically under each date.
sxy = results.apply(lambda x: pd.Series(x["HourOfDay"]), axis=1).stack()
# Name the series.
sxy.name = "HourOfDay"
# Turn the series into a dataframe.
results = | pd.DataFrame(sxy) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index= | Grouper(freq='6MS', key='Date') | pandas.Grouper |
import os
import time
import argparse
import json
import numpy as np
import pandas as pd
from collections import defaultdict
from multiprocessing import Process, Manager
from annoy import AnnoyIndex
from tqdm import tqdm
from lib.log import Logger
def dist_average(indeces, dists, list_dict):
new_dists = list()
for i, index in enumerate(indeces):
ave = (dists[i] + sum([ref_dict[index] for ref_dict in list_dict])) / (1 + len(list_dict))
new_dists.append(ave)
ret_indeces, ret_dists = list(zip(*sorted(list(zip(indeces, new_dists)), key=lambda x: x[1])))
return ret_indeces, ret_dists
def unique_order_preserving(list_input):
result = list()
for item in list_input:
if item not in result:
result.append(item)
return result
def process(project_name, target_id_list, procnum, return_dict):
logger = Logger('_05_make_submission_{}'.format(project_name))
logger.info('=' * 50)
images = list()
with open(os.path.join('_embed_index', 'index_names_{}.json'.format(project_name)), 'r') as f:
index_names = json.load(f)
with open(os.path.join('_embed_index', 'test_names_{}.json'.format(project_name)), 'r') as f:
test_names = json.load(f)
num_index = len(index_names)
f = 512
u = AnnoyIndex(f, metric='euclidean')
u.load(os.path.join('_embed_index', 'index_features_{}.ann'.format(project_name)))
db_test = AnnoyIndex(f, metric='euclidean')
db_test.load(os.path.join('_embed_index', 'test_features_{}.ann'.format(project_name)))
logger.info('===> embed test images and get nearest neighbors')
search_k = 1_000_00
for test_id in tqdm(target_id_list):
# main query
try:
db_index = test_names.index(test_id)
img_feature = db_test.get_item_vector(db_index)
indeces, dists = u.get_nns_by_vector(img_feature, n=300, search_k=search_k,
include_distances=True)
except ValueError:
logger.info('{}: ValueError error'.format(test_id))
indeces, dists = list(range(300)), np.ones(300).tolist()
indeces_init = list(indeces)
dists_init = list(dists)
# sub query
list_dict = list()
sub_id_selected = list()
num_sub_query = 3
for j in range(num_sub_query):
sub_id = indeces[0]
sub_id_selected.append(sub_id)
# search from index
indeces_exp, dists_exp = u.get_nns_by_item(sub_id, n=600, search_k=search_k, include_distances=True)
d = defaultdict(lambda: float(dists_exp[-1]))
for key, dist_exp in zip(indeces_exp, dists_exp):
d[key] = dist_exp
# add result of sub query
list_dict.append(d)
# take average by initial query and current sub queries
indeces, dists = dist_average(indeces_init, dists_init, list_dict)
# remove selected sub_ids
indeces, dists = zip(*[(_id, _dist) for _id, _dist, in zip(indeces, dists) if _id not in sub_id_selected])
# merge selected sub_ids and sorted other sub_ids
indeces = sub_id_selected + list(indeces)
indeces = [index % num_index for index in indeces]
names = [index_names[index] for index in indeces]
names = unique_order_preserving(names)[:100]
images.append(' '.join(names))
return_dict[procnum] = images
def main(project_name):
tic = time.time()
logger = Logger('_05_make_submission_{}'.format(project_name))
logger.info('=' * 50)
sample_submission = pd.read_csv('../../dataset/sample_submission.csv')
images = list()
test_id_list = sample_submission.id
f = 512
u = AnnoyIndex(f, metric='euclidean')
u.load(os.path.join('_embed_index', 'index_features_{}.ann'.format(project_name)))
db_test = AnnoyIndex(f, metric='euclidean')
db_test.load(os.path.join('_embed_index', 'test_features_{}.ann'.format(project_name)))
logger.info('===> embed test images and get nearest neighbors')
manager = Manager()
return_dict = manager.dict()
num_processor = 8
l = [(len(test_id_list) + i) // num_processor for i in range(num_processor)]
processor_target = 0
list_processors = list()
for p in range(num_processor):
pr = Process(target=process,
args=(project_name, test_id_list[processor_target:processor_target+l[p]], p, return_dict))
list_processors.append(pr)
processor_target += l[p]
for p in range(num_processor):
list_processors[p].start()
for p in range(num_processor):
list_processors[p].join()
for p in range(num_processor):
images.extend(return_dict[p])
assert len(images) == len(test_id_list)
submission = | pd.DataFrame(test_id_list, columns=['id']) | pandas.DataFrame |
# link: https://github.com/liyaguang/DCRNN
import numpy as np
import pandas as pd
import json
import util
outputdir = 'output/PEMS_BAY'
util.ensure_dir(outputdir)
dataurl = 'input/PEMS-BAY/'
dataname = outputdir+'/PEMS_BAY'
dataset = pd.read_csv(dataurl+'sensor_graph/graph_sensor_locations_bay.csv', header=None)
dataset.columns = ['sensor_id', 'latitude', 'longitude']
idset = set()
geo = []
for i in range(dataset.shape[0]):
id = dataset['sensor_id'][i]
lat = dataset['latitude'][i]
lon = dataset['longitude'][i]
if id not in idset:
idset.add(id)
geo.append([id, 'Point', '['+str(lon)+', '+str(lat)+']'])
geo = | pd.DataFrame(geo, columns=['geo_id', 'type', 'coordinates']) | pandas.DataFrame |
"""
Binning
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html
converts a numerical column to a matrix of binary variables
1. Execute the code
(in Jupyter, split it into multiple cells)
2. Understand what is happening
QUESTION: How does changing 'n_bins' affect the model?
3. Explain to the rest of the group what you did
"""
import pandas as pd
from sklearn.preprocessing import KBinsDiscretizer
df = | pd.read_csv('penguins_simple.csv', sep=';') | pandas.read_csv |
import pandas as pd
import pytest
from ploomber.validators import (Assert, data_frame_validator, validate_schema,
validate_values)
from ploomber.validators import string
def test_Assert():
assert_ = Assert()
assert_(False, 'Error message')
assert_(True, 'Another error message')
assert len(assert_) == 1
assert assert_.messages_error == ['Error message']
assert repr(assert_) == 'Assert oject with 1 error messages'
with pytest.raises(AssertionError) as excinfo:
assert_.check()
assert str(excinfo.value) == '1 error found:\nError message'
@pytest.fixture
def assert_():
assert_ = Assert()
assert_(False, '1')
assert_(False, '2')
assert_(False, '3')
return assert_
def test_Assert_iter(assert_):
assert list(assert_) == ['1', '2', '3']
def test_Assert_str_without_errors():
assert str(Assert()) == 'No errors found'
def test_Assert_str_with_errors(assert_):
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
def test_Assert_with_warning(assert_):
assert_.warn(False, '4')
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
assert '1 warning' in str(assert_)
assert '4' in str(assert_)
def test_Assert_with_warnings(assert_):
assert_.warn(False, '4')
assert_.warn(False, '5')
assert '3 errors found' in str(assert_)
assert all(msg in str(assert_) for msg in ('1', '2', '3'))
assert '2 warnings' in str(assert_)
assert all(msg in str(assert_) for msg in ('4', '5'))
def test_allows_optional_columns():
df = pd.DataFrame({'a': [0], 'b': [0]})
assert data_frame_validator(
df, [validate_schema(schema={'a': 'int64'}, optional=['b'])])
def test_validates_optional_schema():
df = pd.DataFrame({'a': [0], 'b': [0]})
with pytest.raises(AssertionError):
data_frame_validator(
df,
[validate_schema(schema={'a': 'int64'}, optional={'b': 'object'})])
def test_ignores_dtype_validation_if_none():
df = pd.DataFrame({'a': [0], 'b': [0]})
data_frame_validator(
df, [validate_schema(schema={'a': None}, optional={'b': None})])
def test_raises_on_unexpected_columns():
df = pd.DataFrame({'a': [0], 'b': [0]})
with pytest.raises(AssertionError):
data_frame_validator(
df,
[
validate_schema(schema={'a': 'int64'},
on_unexpected_cols='raise')
],
)
def test_warns_on_unexpected_columns():
df = pd.DataFrame({'a': [0], 'b': [0]})
with pytest.warns(UserWarning):
data_frame_validator(
df,
[
validate_schema(schema={'a': 'int64'},
on_unexpected_cols='warn')
],
)
def test_validate_values_all_ok():
df = pd.DataFrame({'z': [0, 1, 2], 'i': ['a', 'b', 'c']})
data_frame_validator(df, [
validate_values(values={
'z': ('range', (0, 2)),
'i': ('unique', {'a', 'b', 'c'}),
})
])
def test_validate_values_invalid():
df = | pd.DataFrame({'z': [0, 1, 2], 'i': ['a', 'b', 'c']}) | pandas.DataFrame |
# coding: utf-8
# In[130]:
AoI_csv_dir = '1_input/aoi_ids.csv'
label_csv_dir = '1_input/labels.csv'
features_csv_dir = '2_processing/features_table.csv'
BAG_Panden = '/home/data/citycentre/BAG_Panden.shp'
# # Dependencies
# In[131]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
#from PIL import Image
import pandas as pd
from shutil import copyfile
import matplotlib.image as mpimg
import numpy
import geopandas as gpd
import fiona
import rasterio
import rasterio.mask
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from time import time
from scipy.stats import randint
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.mixture import GaussianMixture
from sklearn.svm import LinearSVC
from sklearn.cluster import MeanShift
from sklearn.manifold import LocallyLinearEmbedding
import matplotlib.pyplot as plt
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from shapely.geometry import shape
from shapely.geometry import Polygon
import shapefile
import shutil
import os
import glob
# # Functions
# In[132]:
def ids_to_shp_visualization(ids_list, directory, name, shape_file=BAG_Panden):
i=0
if not os.path.exists(directory+"/temporary"):
os.makedirs(directory+"/temporary")
for identifica in ids_list:
for feat in fiona.open(shape_file, "r"):
if identifica==feat['properties']['Identifica']:
try:
feat2=feat['properties']['Identifica']
feat1=[feat['geometry']]
area_per_roof = feat['properties']['SHAPE_Area']
with fiona.open(shape_file, "r") as shapef:
meta=shapef.meta
with fiona.open(directory+'/temporary/'+str(i)+'.shp', 'w', **meta) as sink:
sink.write(feat)
i=i+1
#if i==1:
# break
except ValueError:
continue
files = glob.glob(directory+"/temporary/*.shp")
w = shapefile.Writer()
for f in files:
r = shapefile.Reader(f)
w._shapes.extend(r.shapes())
w.records.extend(r.records())
w.fields = list(r.fields)
w.save(directory+"/"+name+".shp")
shutil.rmtree(directory+"/temporary/")
# # Model
# In[133]:
aoi_list = []
[aoi_list.append(ID) for ID in pd.read_csv(AoI_csv_dir, dtype=str).ID]
print('The IDs have been added.')
# In[134]:
label_df = pd.read_csv(label_csv_dir, dtype={'ID':str}).set_index('ID')
label_df.label = label_df.label.replace(3,2)
label_df.shape
# In[135]:
features_df = pd.read_csv(features_csv_dir, dtype={'ID':str}).set_index('ID')
features_df.loc(['0599100010050372'])
# In[136]:
features_with_label = pd.concat([features_df, label_df], axis=1)
# In[137]:
# Split-out validation dataset
X_train = features_with_label.loc[label_df.index].drop('label', 1).dropna()
y_train = features_with_label.loc[label_df.index].dropna().label
X_test = features_with_label.loc[aoi_list].drop('label', 1).dropna()
#print(X_train.shape)
#print(y_train.shape)
#print(X_test.shape)
# In[138]:
#y_train.head()
# In[139]:
seed = 0
scoring = 'accuracy'
# In[140]:
models = []
models.append(('GBC', GradientBoostingClassifier()))
models.append(('LR', LogisticRegression()))
models.append(('NB', GaussianNB()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('SVM', SVC()))
models.append(('SGD', SGDClassifier()))
models.append(('LSVM', LinearSVC()))
# In[141]:
cv_results_mean = []
cv_results_std = []
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=5, random_state=seed)
cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
cv_results_mean.append(cv_results.mean())
cv_results_std.append(cv_results.std())
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
fig = plt.figure(figsize=(16, 8))
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.ylim([0,1])
plt.boxplot(results)
ax.set_xticklabels(names)
plt.xlabel('Model', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.show()
# In[142]:
clf = LogisticRegression()
model_train = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred_proba = clf.predict_proba(X_test).tolist()
#print(accuracy_score(y_test, y_pred))
#print(confusion_matrix(y_test, y_pred))
#print(classification_report(y_test, y_pred))
pred_proba = clf.predict_proba(X_test)
proba = clf.fit(X_train, y_train).predict_proba(X_test)
# In[143]:
model_aoi_ids = pd.DataFrame(y_pred_proba, index=X_test.index,
columns=['nonveg_pred_proba', 'green_pred_proba',
'tree_pred_proba'])
# # AoI IDs Table
# In[144]:
model_aoi_ids['probability'] = model_aoi_ids[['nonveg_pred_proba','green_pred_proba',\
'tree_pred_proba']].max(axis=1)
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.nonveg_pred_proba, 'classification')] = '1'
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.green_pred_proba, 'classification')] = '2'
model_aoi_ids.loc[(model_aoi_ids.probability == model_aoi_ids.tree_pred_proba, 'classification')] = '4'
model_aoi_ids.loc[(model_aoi_ids.classification == '1', 'category')] = 'Non-Vegetation'
model_aoi_ids.loc[(model_aoi_ids.classification == '2', 'category')] = 'Vegetation'
model_aoi_ids.loc[(model_aoi_ids.classification == '4', 'category')] = 'Trees'
# In[145]:
output_model_aoi_ids = | pd.concat([model_aoi_ids, features_with_label.loc[X_test.index]], axis=1, join='inner') | pandas.concat |
import pandas as pd
import os
from os.path import expanduser
home = expanduser("~")
import sys
import re
import time
import joblib
from datetime import datetime
import numpy as np
from collections import defaultdict
from Logger import get_logger, close
from util import lang2id
import glob
import json
mldoc_folder = "/home/gkaraman/data2/multilingual/mldoc/preprocessed_data/"
cls_folder = "/home/gkaraman/data2/multilingual/cls/multifit_preprocessed/"
twitter_folder = "/home/gkaraman/data1/data/senti_transfer/labeled_data/"
lorelei_folder = "/home/gkaraman/data2/multilingual/lorelei/preprocessed_data/"
label2ind_dict = {
'mldoc': {'CCAT': 0, 'ECAT': 1, 'GCAT': 2, 'MCAT': 3},
'cls': {'NEG': 0, 'POS': 1},
'twittersent': {'negative': 0, 'neutral': 1, 'positive': 2},
'lorelei': {'non-med':0, 'med':1}
}
def load_df_mldoc(method='train', language='english', train_size=1000, print_fn=None):
if method == 'train':
fpath = os.path.join(mldoc_folder, language + '.train.{}'.format(train_size))
elif method == 'dev':
fpath = os.path.join(mldoc_folder, language + '.dev')
elif method == 'test':
fpath = os.path.join(mldoc_folder, language + '.test')
elif method == 'unlabeled':
fpath = os.path.join(mldoc_folder, language + '.train.{}'.format(train_size))
print_fn('Loading {} {} data'.format(method, language))
df = pd.read_csv(fpath, delimiter='\t', header=None, names=["label", "text"])
return df
def load_df_cls(method='train', language='english', domain='books', print_fn=None, dev_ratio=0.2):
if method == 'dev':
method = 'test'
dataset_folder = os.path.join(cls_folder, '{}-{}/'.format(lang2id(language), domain))
if method =='unlabeled':
method='unsup'
fpath = os.path.join(dataset_folder, '{}.{}.csv'.format(lang2id(language), method))
print_fn('loading data: {}'.format(fpath))
df = | pd.read_csv(fpath, header=None, names=["label", "title", "description"]) | pandas.read_csv |
import time
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
import stellargraph as sg
from stellargraph.mapper import CorruptedGenerator, HinSAGENodeGenerator
from stellargraph.layer import DeepGraphInfomax, HinSAGE
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import Model, optimizers, losses, metrics
'''
Runs the entire pipeline:
- Takes preprocessed data as input
- Outputs predictions on the test_set nodes.
'''
def DGIPipeline(v_sets, e_sets, v_data, e_data, core_targets, ext_targets, core_testing):
print("HINSAGE DGI FULL PIPELINE STARTED")
tin = time.time()
#? Sort based on testingFlag
# data_splits[i].iloc[INDEX].values[0]
# where INDEX:
# [0] testingFlag=NaN
# [1] testingFlag=0
# [2] testingFlag=1
data_splits = dict()
for i in v_sets:
v_sets[i] = v_sets[i].sort_values('testingFlag')
data_splits[i] = v_sets[i].testingFlag.value_counts().to_frame()
v_sets[i] = v_sets[i].drop('testingFlag', axis=1)
#? Removing ExtendedCaseGraphID
for i in v_sets:
v_sets[i] = v_sets[i].drop('ExtendedCaseGraphID', axis=1)
#? Create the graph object
G = sg.StellarDiGraph(v_sets, e_sets)
'''
Iterate through the algotithm for every node type.
This is because HinSAGE can predict on one node type at a time, even though
it uses all the graph to compute the embeddings.
'''
# Parameters
batch_size = 200
dropout = 0.4
verbose = 1
visualize = False
def run_for_node_type(v_type, hinsage_layer_sizes, num_samples, activations, epochs):
nan_tflag = data_splits[v_type].iloc[0].values[0]
train_tflag = data_splits[v_type].iloc[1].values[0]
test_tflag = data_splits[v_type].iloc[2].values[0]
train_cv_set = v_sets[v_type][nan_tflag:nan_tflag+train_tflag]
train_cv_ids = train_cv_set.index.values.tolist()
train_cv_labels = v_data.loc[[int(node_id) for node_id in train_cv_ids]].ExtendedCaseGraphID
test_set = v_sets[v_type][-test_tflag:]
test_ids = test_set.index.values.tolist()
generator = HinSAGENodeGenerator(
G,
batch_size,
num_samples,
head_node_type=v_type
)
hinsage = HinSAGE(
layer_sizes=hinsage_layer_sizes,
activations=activations,
generator=generator,
bias=True,
normalize="l2",
dropout=dropout
)
def run_deep_graph_infomax(base_model, generator, epochs):
print(f"Starting training for {v_type} type: ")
t0 = time.time()
corrupted_generator = CorruptedGenerator(generator)
gen = corrupted_generator.flow(G.nodes(node_type=v_type))
infomax = DeepGraphInfomax(base_model, corrupted_generator)
x_in, x_out = infomax.in_out_tensors()
# Train with DGI
model = Model(inputs=x_in, outputs=x_out)
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3))
es = EarlyStopping(monitor="loss", min_delta=0, patience=10)
history = model.fit(gen, epochs=epochs, verbose=verbose, callbacks=[es])
#sg.utils.plot_history(history)
x_emb_in, x_emb_out = base_model.in_out_tensors()
if generator.num_batch_dims() == 2:
x_emb_out = tf.squeeze(x_emb_out, axis=0)
t1 = time.time()
print(f'Time required: {t1-t0:.2f} s ({(t1-t0)/60:.1f} min)')
return x_emb_in, x_emb_out, model
#? Train HinSAGE model:
x_emb_in, x_emb_out, _model = run_deep_graph_infomax(hinsage, generator, epochs=epochs)
emb_model = Model(inputs=x_emb_in, outputs=x_emb_out)
train_cv_embs = emb_model.predict(
generator.flow(train_cv_set.index.values)
)
#? Optional: Plot embeddings of training and CV set of current node type
if (visualize == True):
train_cv_embs_2d = pd.DataFrame(
TSNE(n_components=2).fit_transform(train_cv_embs),
index=train_cv_set.index.values
)
label_map = {l: i*10 for i, l in enumerate(np.unique(train_cv_labels), start=10) if pd.notna(l)}
node_colours = [label_map[target] if pd.notna(target) else 0 for target in train_cv_labels]
alpha = 0.7
fig, ax = plt.subplots(figsize=(15, 15))
ax.scatter(
train_cv_embs_2d[0],
train_cv_embs_2d[1],
c=node_colours,
cmap="jet",
alpha=alpha,
)
ax.set(aspect="equal")
plt.title(f"TSNE of HinSAGE {v_type} embeddings with DGI- coloring on ExtendedCaseGraphID")
plt.show()
return 1
#? Split training and cross valuation set using 80% 20% simple ordered split
n_embs = train_cv_embs.shape[0]
train_size = int(n_embs*0.80)
cv_size = int(n_embs*0.20)
train_set = train_cv_embs[:train_size]
train_labels = np.ravel( | pd.DataFrame(train_cv_labels.values[:train_size]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Functions/script to export the LinkedBooks data into OpenCitation's RDF format.
Usage:
rdf_exporter/rdf_exporter.py --help
rdf_exporter/rdf_exporter.py --api-base=<url> --out-dir=<path>
python -m pdb rdf_exporter.py --api-base=http://cdh-dhlabpc6.epfl.ch:8888/api\
--out-dir=/Users/rromanello/Documents/LinkedBooks/LinkedBooksCitationCorpus/data
"""
__author__ = """<NAME>"""
import codecs
import logging
import os
import pdb
import shutil
import sys
from collections import namedtuple
import pandas as pd
import requests
from docopt import docopt
from datetime import datetime
from rdflib_jsonld.context import Context
from rdflib import Graph, Literal, URIRef
from rdflib.namespace import FOAF, OWL, RDF, RDFS, DCTERMS, XSD
from rdflib import Namespace
sys.path += ["../", "./"]
global PORT, API_BASEURI, AUTHOR_ENDPOINT, AUTHORS_ENDPOINT, ARTICLE_ENDPOINT,\
ARTICLE_ENDPOINT, BOOKS_ENDPOINT, BOOK_ENDPOINT, PRIMARY_SOURCE_ENDPOINT, \
PRIMARY_SOURCES_ENDPOINT, REFERENCES_ENDPOINT, REFERENCE_ENDPOINT,\
STATS_ENDPOINT
logger = logging.getLogger(__name__)
ProvenanceEntity = namedtuple(
"ProvenanceEntity", [
'resource_id',
'type',
'uri',
'graph',
'described_resource_id',
'described_resource_type'
# TODO: add `graph`
]
)
Entity = namedtuple(
"Entity", [
'mongo_id',
'type',
'uri',
'path', # TODO: remove
'resource_id',
# TODO: add `graph`
]
)
####################################
# declaration of rdflib namespaces #
####################################
oc_ns = Namespace("https://w3id.org/oc/ontology/")
spar_prov_ns = Namespace("http://purl.org/spar/pro/")
prov_ns = Namespace("http://www.w3.org/ns/prov#")
occ_ns = Namespace("https://w3id.org/oc/ontology/")
fabio_ns = Namespace("http://purl.org/spar/fabio/")
cito_ns = Namespace("http://purl.org/spar/cito/")
################################
# API => RDF mapping functions #
################################
def export(api_base_uri, base_uri, out_dir):
"""Map authors, publications and references onto OpenCitation's data model.
"""
logger.info("Creating export of LinkedBooks data.")
logger.info("API Base URI = {}".format(api_base_uri))
logger.info("RDF base URI = {}".format(base_uri))
logger.info("Output directory = {}".format(out_dir))
# TODO: cycle instead of taking 10 records
offset = 0
limit = 3
r = requests.get(
AUTHORS_ENDPOINT,
params={'offset': offset, 'limit': limit}
)
authors_data = r.json()
created_entities = []
# instantiate the curation agent for the LB API
api_curation_agent = create_prov_agent(
"LinkedBooks API v1.0",
os.path.join(base_uri, 'prov'),
os.path.join(out_dir, 'prov')
)
created_entities.append(api_curation_agent)
df = save_entities(
pd.DataFrame([], columns=Entity._fields),
created_entities
)
for record in authors_data:
author_id = record["author"]["id"]
api_url = AUTHOR_ENDPOINT % author_id
r = requests.get(api_url)
data = r.json()
# check if author not yet contained in df
if author_id not in list(df['mongo_id']):
new_entities = export_author(
data["author"],
api_url,
base_uri,
api_curation_agent,
out_dir
)
df = save_entities(df, new_entities)
publications = data["publications"]
pubs = []
pubs += [("book", pub["id"]) for pub in publications["books"]]
pubs += [("article", pub["id"]) for pub in publications["articles"]]
for pub_type, pub_id in pubs:
# determine the API endpoint to call, based on publication type
if pub_type == "book":
api_url = BOOK_ENDPOINT % pub_id
elif pub_type == "article":
api_url = ARTICLE_ENDPOINT % pub_id
r = requests.get(api_url)
data = r.json()
pub_data = data["book"] if pub_type == "book" else data["article"]
cited_pubs = []
cited_pubs += [
(pub["id"], "book", pub)
for pub in data["cited"]["books"]
]
cited_pubs += [
(pub["id"], "article", pub)
for pub in data["cited"]["articles"]
]
cited_pubs += [
(pub["id"], "primary_source", pub)
for pub in data["cited"]["primary_sources"]
]
########################################
# TODO: instantiate cited publications #
########################################
for pub_id, pub_type, pub_info in cited_pubs:
if pub_type == "book":
cited_pub_api_url = BOOK_ENDPOINT % pub_id
elif pub_type == "article":
cited_pub_api_url = ARTICLE_ENDPOINT % pub_id
elif pub_type == "primary_source":
cited_pub_api_url = PRIMARY_SOURCE_ENDPOINT % (
"asve",
pub_id
)
df = export_publication(
pub_id,
pub_type,
pub_info,
cited_pub_api_url,
df,
base_uri,
api_curation_agent,
out_dir,
)
if len(cited_pubs) > 0:
# get URIs of cited publications
cited_pubs_uris = list(
df[df['mongo_id'].isin(
[p_id for p_id, p_type, p_data in cited_pubs]
)]['uri'].unique()
)
else:
cited_pubs_uris = None
df = export_publication(
pub_id,
pub_type,
pub_data,
api_url,
df,
base_uri,
api_curation_agent,
out_dir,
cited_pubs_uris
)
return df
def save_entities(temp_df, entities):
"""TODO."""
df = pd.concat([
temp_df,
| pd.DataFrame(entities, columns=Entity._fields) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
# sys.path.append("..")
# sys.path.append("../..")
import os, errno
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.metrics import roc_auc_score as auc_compute
from sklearn.metrics import average_precision_score as avpr_compute
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from core_models.Marginals.main_Marginals import get_prob_matrix
import core_models.parser_arguments as parser_arguments
from core_models.utils import get_auc_metrics, get_avpr_metrics
import core_models.utils as utils
import warnings
def main(args):
# Load datasets
train_loader, X_train, target_errors_train, dataset_obj_train, attributes = utils.load_data(args.data_folder, args.batch_size,
is_train=True, is_one_hot=args.is_one_hot)
test_loader, X_test, target_errors_test, _, _ = utils.load_data(args.data_folder, args.batch_size, is_train=False)
df_data_train = dataset_obj_train.df_dataset_instance
# Run Marginals to obtain cell log probs
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p_mat_train, _, _, _ = get_prob_matrix(df_data_train, dataset_obj_train.cat_cols, n_comp_max=40)
nll_marginal_cell = -np.log(p_mat_train + 1e-8)
target_errors_row_train = (target_errors_train.sum(dim=1)>0)
target_row_train = target_errors_row_train.numpy()
target_errors_row_test = (target_errors_test.sum(dim=1)>0)
target_row_test = target_errors_row_test.numpy()
# Run OCSVM row outlier detection
clf = svm.OneClassSVM(nu=0.2, kernel="rbf", gamma=0.1)
clf.fit(X_train)
outlier_score_row_train = -clf.score_samples(X_train)
outlier_score_row_test = -clf.score_samples(X_test)
# Platt Scaling (uses Logistic Regression) of OCSVM scores
lr_calib = LogisticRegression(solver='lbfgs')
lr_calib.fit(outlier_score_row_test.reshape(-1,1), target_row_test)
p_inlier_train = lr_calib.predict_proba(outlier_score_row_train.reshape(-1,1))[:,0]
nll_inlier_row_train = -np.log(p_inlier_train + 1e-8) # -log (p_inlier)
# Row metrics
auc_row_train = auc_compute(target_row_train, outlier_score_row_train)
avpr_row_train = avpr_compute(target_row_train, outlier_score_row_train)
ll_row_train = log_loss(target_row_train, outlier_score_row_train)
auc_row_train_calibed = auc_compute(target_row_train, nll_inlier_row_train)
avpr_row_train_calibed = avpr_compute(target_row_train, nll_inlier_row_train)
ll_row_train_calibed = log_loss(target_row_train, 1.-p_inlier_train)
print("AUC Prev. Calib.: {}".format(auc_row_train))
print("AVPR Prev. Calib.: {}".format(avpr_row_train))
print("Cross-Entropy Prev. Calib. {}".format(ll_row_train))
# Re-check score is still good after calibration (AVPR and AUC should be same);
# then Cross-Entropy should drop !!
print("AUC Post. Calib.: {}".format(auc_row_train_calibed))
print("AVPR Post. Calib.: {}".format(avpr_row_train_calibed))
print("Cross-Entropy Post. Calib. {}".format(ll_row_train_calibed))
# combine calibrated OCSVM and Marginals for cell outlier detection
nll_cells_final_train = nll_inlier_row_train.reshape(-1,1) + nll_marginal_cell
# Cell metrics
auc_cell_train, auc_feats = get_auc_metrics(target_errors_train, nll_cells_final_train)
avpr_cell_train, avpr_feats = get_avpr_metrics(target_errors_train, nll_cells_final_train)
print('Combined: OCSVM + Marginals Train -- Cell AUC: {}, Cell AVPR: {}, Row AUC: {}, Row AVPR: {}'.format(
auc_cell_train, avpr_cell_train, auc_row_train, avpr_row_train))
#Save results into csv
if args.save_on:
# create folder for saving experiment data (if necessary)
folder_output = args.output_folder + "/" + args.outlier_model
try:
os.makedirs(folder_output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
columns = ['AUC row','AVPR row','AUC cell','AVPR cell']
results = {'AUC row': [auc_row_train], 'AVPR row': [avpr_row_train],
'AUC cell': [auc_cell_train], 'AVPR cell': [avpr_cell_train]}
#Dataframe
df_out = | pd.DataFrame(data=results, columns=columns) | pandas.DataFrame |
# all domains
# merge/split common boundary x = max(3bin,0.1 TAD Length)
# region < agrs.remote
# less complex
# zoom
# to filter the strength first
import pandas as pd
import numpy as np
#from tqdm import tqdm
import argparse
import os
# import warnings
# warnings.filterwarnings('ignore')
# the arguments from command line
parser = argparse.ArgumentParser(description='python scriptname <-d> <-t> [options]')
parser.add_argument('-d','--diff', type=str, default = None,help="path/ the text of diffdoamin's outcome")
parser.add_argument('-t','--tad',type=str, default=None,help='path/ the other tadlist')
parser.add_argument('-o','--out',type=str,default=None,help='the output path')
parser.add_argument('-l','--limit',type=int,default=40000,help='the range(length of bases) to judge the common boundary')
parser.add_argument('-k','--kpercent',type=int,default=10,help='the common boundareis are within max(l*bin,k% TAD length)')
parser.add_argument('-r','--remote',type=int,default=1000000,help='the limitation of the biggest region')
parser.add_argument('-s1','--skip1',type=int,default=25,help='to skip the first s1 rows in "--diff" file; if you input 25, the first 25 rows [0,24] will be skipped.')
parser.add_argument('-s2','--skip2',type=int,default=None,help='to skip the first s2 rows in the other tadlist file')
parser.add_argument('--sep1',type=str,default='\t',help="the seperator of the diffdoamin's outcome (like ',')")
parser.add_argument('--sep2',type=str,default='\t',help="the seperator of the other tadlist")
args = parser.parse_args()
# load the files
data = pd.read_table(args.diff,skiprows=skip1,sep=args.sep1)
tad = pd.read_table(args.tad,skiprows=skip2,sep=args.sep2,header=None)
#preprocessing
cols = data.columns
data.rename(columns={cols[0]:'chr',cols[1]:'start',cols[2]:'end'},inplace=True)
data_diff = data.loc[data['adj_pvalue']<0.05,['chr','start','end']]
data_diff['significant'] = 1
data_diff.reset_index(inplace=True,drop=True)
tad = tad.iloc[:,0:3]
tad.columns = ['chr','start','end']
tad.sort_values(by=['chr','start','end'],inplace=True)
tad.reset_index(inplace=True,drop = True)
tad['range'] = list(map(lambda a,b:(a,b) , tad.start,tad.end))
# preparation
chrs = list(map(str,list(range(1,23))))+['X']
colnames = ['chr','start','end','range','type','origin','subtype','significant']
tad_ = data_main = loss = single = merge = split = multi = pd.DataFrame(columns=colnames)
tad_ = pd.concat([tad_,tad],axis=0)
tad = tad_
data_main = pd.concat([data_main,data.iloc[:,0:3]],axis=0)
data_main['significant'] = 0
data_main = pd.concat([data_main,data_diff],axis=0)
data_main.drop_duplicates(subset=['chr','start','end'],keep='last',inplace=True)
data_main['range'] = list(map(lambda a,b:(a,b) , data_main.start,data_main.end))
data_main['origin'] = 'diffdomain'
data_main.sort_values(by=['chr','start','end'],inplace=True)
data_main.reset_index(inplace=True,drop=True)
def identical(boundary1,boundary2):
# to judge the "common boundary"
if int(boundary1) <= int(boundary2)+limit and int(boundary1) >= int(boundary2)-limit:
return True
else:
return False
def cross(main,vise):
# main is the protagnist tad
# to find the tads related to main in vise
note=pd.DataFrame(columns=colnames)
for i in range(vise.shape[0]):
if (int(main['end'])-limit > int(vise.loc[i,'start']) and int(main['start'])+limit < int(vise.loc[i,'end']) ):
note=pd.concat([note,pd.DataFrame(vise.loc[i,:].values.reshape(1,-1),columns=colnames)],axis=0)
return note
def n_of_region(outcome):
# to count the number of regions in the dataframe
n_region = 0
if len(outcome) != 0 :
n_region = 1
for i in range(2,len(outcome)):
if outcome['origin'].values[i]=='diffdomain' and outcome['origin'].values[i-1]=='the other tadlist':
n_region = n_region+1
return n_region
def n_diffdomain(outcome):
n_diff = outcome.loc[outcome['origin']=='diffdomain',:].shape[0]
return n_diff
# the 4th virsion+ bin
# try:
for c in chrs:
temp = data_main.loc[data_main['chr']==c,:].copy()
tadlist = tad.loc[tad['chr']==c,:].copy()
tadlist['origin'] = 'the other tadlist'
temp.reset_index(inplace=True,drop=True)
tadlist.reset_index(inplace=True,drop=True)
temp = temp[colnames]
tadlist = tadlist[colnames]
temp['start'] = temp['start'].astype(int)
temp['end'] = temp['end'].astype(int)
tadlist['start'] = tadlist['start'].astype(int)
tadlist['end'] = tadlist['end'].astype(int)
# filter the strength-change diffdomains and other non-significantly differentail tads with common boudaries in vise tadlist
tad_index = []
cross_index = []
for i in range(temp.shape[0]):
# the i th TADs in the result of DiffDomain
# to filter the TADs with common boundaries in different conditions
# initialize the variables
note_tad = note_cross = pd.DataFrame(columns=colnames)
# set the "limit" for judging the common boundaries
limit = max(args.limit,args.kpercent*0.01*(temp['end'][i]-temp['start'][i]))
note_tad = pd.concat([note_tad,pd.DataFrame(temp.loc[i,:].values.reshape(1,-1),columns=colnames)],axis=0)
for k in range(tadlist.shape[0]):
if (identical(temp.loc[i,'start'],tadlist.loc[k,'start'])) and (identical(temp.loc[i,'end'],tadlist.loc[k,'end'])) :
note_cross = pd.concat([note_cross,pd.DataFrame(tadlist.loc[k,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
cross_index.append(k)
tad_index.append(i)
n_cross = note_cross.shape[0]
if n_cross !=0 :
# in case that there are TADs in temp having common boundaries but not in tadlist
for j in range(i+1,temp.shape[0]):
# to find the TADs (in the result of DiffDomain) located on the same boundaries with the i th TADs
if (identical(temp.loc[i,'start'],temp.loc[j,'start'])) and (identical(temp.loc[i,'end'],temp.loc[j,'end'])):
note_tad = pd.concat([note_tad,pd.DataFrame(temp.loc[j,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
tad_index.append(i)
tad_index.append(j)
note_tad.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_cross.drop_duplicates(subset=['chr','start','end'],inplace=True)
n_tad = note_tad.shape[0]
if n_tad ==1 and n_cross ==1 :
note_tad['type'] = 'single'
note_tad['subtype'] = 'strength'
single = pd.concat([single,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad == 1 and n_cross >=2 :
note_tad['type'] = 'split'
split = pd.concat([split,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad >= 2 and n_cross ==1 :
note_tad['type'] = 'merge'
merge = pd.concat([merge,note_tad,note_cross],axis=0,
ignore_index = True)
elif n_tad >= 2 and n_cross >= 2 :
if n_tad == n_cross :
note_tad['type'] = 'single'
note_tad['subtype'] = 'strength'
single = pd.concat([single,note_tad,note_cross],axis=0,
ignore_index = True)
else:
note_tad['type'] = 'complex'
multi = pd.concat([multi,note_tad,note_cross],axis=0,
ignore_index = True)
temp.drop(tad_index,inplace=True)
temp.reset_index(drop=True,inplace=True)
tadlist.drop(cross_index,inplace = True)
tadlist.reset_index(drop=True,inplace=True)
# temp_sig = temp.loc[temp['significant']==1,:].copy()
# temp_sig.reset_index(drop = True,inplace=True)
for i in range(temp.shape[0]):
# to adjust the longest distance between "common boundaries"
# to find the related TADs without common boundaries in different conditions
limit = max(args.limit,(temp['end'][i]-temp['start'][i])*args.kpercent*0.01)
note_cross = pd.DataFrame(columns=colnames)
note_tad = pd.DataFrame(columns=colnames)
# to find the tads in tadlist related to the significantly differential tad
note_cross = pd.concat([note_cross,cross(temp.iloc[i,:],tadlist)],axis=0,
ignore_index = True)
note_tad = pd.concat([note_tad,pd.DataFrame(temp.iloc[i,:].values.reshape(1,-1),columns=colnames)],
axis=0,ignore_index = True)
n_cross = note_cross.shape[0]
if n_cross == 0:
# the significantly differential tad grew out of nothing
note_tad['type'] = 'loss'
loss = pd.concat([loss,note_tad],axis=0
,ignore_index = True)
elif n_cross >=1:
flag = 1
note_tad['start'] = note_tad['start'].astype(int)
note_tad['end'] = note_tad['end'].astype(int)
note_cross['start'] = note_cross['start'].astype(int)
note_cross['end'] = note_cross['end'].astype(int)
while (flag == 1) and (max(note_tad['end'])-min(note_tad['start']) <= int(args.remote)):
for p in range(note_cross.shape[0]):
# to find TADs related to the tads found in vise tadlist
note_tad = pd.concat([note_tad,cross(note_cross.iloc[p,:],temp)],axis=0,ignore_index = True)
for q in range(note_tad.shape[0]):
# to find TADs in the tadlist related to the TADs in the result of DiffDomain
note_cross = pd.concat([note_cross,cross(note_tad.iloc[q,:],tadlist)],axis=0,ignore_index = True)
first_tad = note_tad.loc[note_tad.start == min(note_tad.start),:]
last_tad = note_tad.loc[note_tad.end == max(note_tad.end),:]
first_cross = note_cross.loc[note_cross.start == min(note_cross.start),:]
last_cross = note_cross.loc[note_cross.end == max(note_cross.end),:]
thres1 = pd.concat([cross(first_tad.iloc[0,:],tadlist),cross(last_tad.iloc[0,:],tadlist)],axis=0)
thres2 = pd.concat([cross(first_cross.iloc[0,:],temp),cross(last_cross.iloc[0,:],temp)],axis=0)
if (thres1['range'].isin(note_cross['range'])).all() and thres2['range'].isin(note_tad['range']).all():
flag = 2
note_tad.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_cross.drop_duplicates(subset=['chr','start','end'],inplace=True)
note_tad.reset_index(inplace=True,drop=True)
note_cross.reset_index(inplace=True,drop=True)
n_tad = note_tad.shape[0]
n_cross = note_cross.shape[0]
if n_tad == 1 and n_cross == 1:
note_tad['type'] = 'single'
note_tad['subtype'] = 'zoom'
single = pd.concat([single,note_tad,note_cross],axis=0,ignore_index = True)
elif n_tad == 1 and n_cross >= 2:
note_tad['type'] = 'split'
split = pd.concat([split,note_tad,note_cross],axis=0,ignore_index = True)
elif n_tad >= 2 and n_cross ==1:
note_tad['type'] = 'merge'
merge = pd.concat([merge,note_tad,note_cross],axis=0,ignore_index = True)
elif n_tad >=2 and n_cross >=2:
note_tad['type'] = 'complex'
multi = pd.concat([multi,note_tad,note_cross],axis=0,ignore_index = True)
# except Exception as e:
# print(e)
# print('Interrupted!')
# else:
result = | pd.DataFrame(columns=colnames) | pandas.DataFrame |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str')
virtuales=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/virtuales.csv',encoding='utf-8',sep=';')
df=datas.copy()
out=salida.copy()
seg=seguimiento.copy()
vir=virtuales.copy()
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],inplace=True)
out=out[out['Repetido CC']=='0']
out=out[~out.duplicated(keep='last')]
## Cleaning
df['Marca Score']=df['Marca Score'].str.strip().fillna('NO REGISTRA')
df['Marca Score'][df['Marca Score']==''] ='NO REGISTRA'
df['Analisis De Habito']=df['Analisis De Habito'].fillna('NO DEFINE')
df['Analisis De Habito'][df['Analisis De Habito']==' '] ='NO DEFINE'
df['Tipo de Cliente'][df['Tipo de Cliente']==' '] ='NO DEFINE'
df['Marca Funcional']=df['Marca Funcional'].str.replace(' ','0')
df['Marca']=df['Marca'].str.replace(' ','0')
df['Antiguedad Cliente'][df['Antiguedad Cliente']==' '] ='NO REGISTRA'
df['Perfil Digital']=df['Perfil Digital'].fillna('Sin perfil')
df['Nivel de riesgo experian']=df['Nivel de riesgo experian'].str.replace(' ','NO REGISTRA')
df['Nivel de Riesgo']=df['Nivel de Riesgo'].str.replace(' ','NO REGISTRA')
df['Nivel Estrategia Cobro']=df['Nivel Estrategia Cobro'].str.replace(' ','NO REGISTRA')
df['Real reportado en central de riesgos']=df['Real reportado en central de riesgos'].str.replace(' ','0')
df['Nivel de Riesgo'][df['Nivel de Riesgo']==' '] ='NO REGISTRA'
df['Estado del Cliente'][df['Estado del Cliente']==' '] ='SIN IDENTIFICAR'
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] ='SIN IDENTIFICAR'
df['Estrategia'][df['Estrategia']==' '] ='SIN ESTRATEGIA'
df['Autopago'][df['Autopago']==' '] ='NO APLICA'
df['Tipo de Cliente']=df['Tipo de Cliente'].fillna('NO DEFINE')
df['Tipo de Reporte a Central de Riesgos'][df['Tipo de Reporte a Central de Riesgos']==' '] ='NO REGISTRA'
df['Codigo edad de mora(para central de riesgos)']=df['Codigo edad de mora(para central de riesgos)'].str.replace(' ','NO REGISTRA')
df['Análisis Vector'][df['Análisis Vector']==' '] ='SIN IDENTIFICAR'
df['Análisis Vector_PAGOS_PARCIAL'] = np.where(df['Análisis Vector'].str.contains("PAGO PARCIAL|PAGOS PARCIAL"),"1",'0')
df['Análisis Vector_PAGO OPORTUNO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_SIN_IDENTIFICAR'] = np.where(df['Análisis Vector'].str.contains("SIN IDENTIFICAR"),"1",'0')
df['Análisis Vector_SIN_PAGO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_suspension'] = np.where(df['Análisis Vector'].str.contains("SUSPENSIO"),"1",'0')
df['Análisis Vector_indeterminado'] = np.where(df['Análisis Vector'].str.contains("PAGO OPORTUNO Y NO OPORTUNO"),"1",'0')
df['Análisis Vector_pago_no_oport'] = np.where(df['Análisis Vector'].str.contains("PAGO NO OPORTUNO"),"1",'0')
df['Análisis Vector_otro_caso'] = np.where(df['Análisis Vector'].str.contains("NUEVO|FACTURAS AJUSTADAS|PROBLEMAS RECLAMACION"),"1",'0')
df['Vector Cualitativo # Suscripción'][df['Vector Cualitativo # Suscripción']==' '] = df["Vector Cualitativo # Suscripción"].mode()[0]
df['Fecha Ult Gestion']=pd.to_datetime(df['Fecha Ult Gestion'],format='%Y-%m-%d')
###PARSE DATES AND CREATE NEW FEATURES
df['Fecha de Asignacion']=pd.to_datetime(df['Fecha de Asignacion'],format='%Y-%m-%d %H:%M:%S')
df['Fecha Ult pago']=pd.to_datetime(df['Fecha Ult pago'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(df['Fecha de cuenta de cobro mas antigua'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["Dias_ult_pago"] = (df['Fecha Ult pago']).dt.day
df["dia_semana_ult_pago"] = (df['Fecha Ult pago']).dt.weekday
df["mes_ult_pago"]=df["Fecha Ult pago"].dt.month
df["semana_ult_pago"]=df["Fecha Ult pago"].dt.week
df["trimestre_ult_pago"] = df["Fecha Ult pago"].dt.quarter
df["año_ult_pago"] = df["Fecha Ult pago"].dt.year
df["DIAS_desde_ult_pago"] = (df["Fecha Ult Gestion"] - df["Fecha Ult pago"]).dt.days
df["Fecha estado corte"]=pd.to_datetime(df["Fecha estado corte"],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["dias_ult_pago_cobro"] = (df["Fecha Ult pago"]-df["Fecha estado corte"]).dt.days
df["dias_ult_pago_fac_ant"] = (df["Fecha Ult pago"]-df["Fecha de cuenta de cobro mas antigua"]).dt.days
df['Fecha de Asignacion_mes']=df["Fecha de Asignacion"].dt.month
df['Fecha de Instalacion']=pd.to_datetime(df['Fecha de Instalacion'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['antiguedad_mes']=(dt.datetime.now()-df['Fecha de Instalacion']).dt.days/365
df['Fecha Retiro']=pd.to_datetime(df['Fecha Retiro'].str.replace('4732','2020'),format='%Y-%m-%d',errors = "coerce")
df['Fecha Vencimiento Sin Recargo']=pd.to_datetime(df['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
df['dias_desde_ult_gestion']=(dt.datetime.now()-df['Fecha Ult Gestion']).dt.days
## Group labels
df['Descripcion subcategoria']=df['Descripcion subcategoria']\
.str.replace('Consumos EPM Telco|INALAMBRICOS NO JAC|unica|COMERCIAL|ENTERPRISE|MONOPRODUCTO|PYME|------------------------------|LINEA BUZON','NO REGISTRA')\
.str.replace('ESTRATO MEDIO ALTO|MEDIO ALTO','ESTRATO 4')\
.str.replace('ESTRATO ALTO|ALTO','ESTRATO 6')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('ESTRATO MEDIO|MEDIO','ESTRATO 3')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('BAJO BAJO|ESTRATO BAJO-BAJO|ESTRATO BAJO|BAJO','ESTRATO 1')
df['Descripcion subcategoria'][df['Descripcion subcategoria']=='-'] ='NO REGISTRA' ## No registra
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] = df["Tipificación Cliente"].mode()[0] ## Reemplazo con la moda
df['Dias Suspension'][df['Dias Suspension']==' ']=0
df['Dias Suspension']=df['Dias Suspension'].astype('int')
## Group labels
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip().str.upper()\
.str.replace('TELEVISION UNE|TELEVISION INTERACTIVA|TV CABLE|TV INTERACTIVA|UNE TV|TELEVISION SIN SEÃƑ‘AL|TELEVISION SIN SEÃƑ‘AL|TV CABLE SIN SEÑAL','TELEVISION')\
.str.replace('INTERNET BANDA ANCHA|SEGUNDA CONEXION INTERNET|BANDA ANCHA|INTERNET EDATEL|INTERNET INSTANTANEO|CABLE MODEM|INTERNET DEDICADO 11|ADSL BASICO','INTERNET')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA|TIGO|ETB','UNEMOVIL')\
.str.replace('TOIP|TELEFONICA TELECOM|TELECOM|TO_SINVOZ','TELEFONIA')\
.str.replace('LÃƑÂNEA BÃƑ¡SICA','LINEA BASICA')
df['Descripcion categoria']=df['Descripcion categoria'].str.replace("[^a-zA-Z ]+", "NO REGISTRA")
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip()\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV|TELEVISIONSIN SEÑAL','TELEVISION')\
.str.replace('Internet Banda Ancha|Internet EDATEL|CABLE MODEM','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA','UNEMOVIL')\
.str.replace('TOIP','TELEFONIA')
df['Descripcion producto']=df['Descripcion producto'].str.strip().str.replace('-','')\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV','TELEVISION')\
.str.replace('Internet Banda Ancha','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')
conteo3=df['Descripcion producto'].value_counts().iloc[:7].index.tolist()
df['Descripcion producto_resumen']=df.apply(
lambda row: row['Descripcion producto'] if (row['Descripcion producto'] in conteo3)
else 'OTRO PRODUCTO',axis=1)
df['Descripcion producto_resumen']=df['Descripcion producto_resumen'].str.strip()
df['Tipo Contactabilidad'][df['Tipo Contactabilidad']==' '] ='NO REGISTRA'
df['Indicador BI'][df['Indicador BI']==' '] ='NO REGISTRA'
## Create variable
df['antiguedad_mes']=df['antiguedad_mes'].astype(int)
col = 'antiguedad_mes'
condi = [ df[col] < 12, df[col].between(12, 24, inclusive = True),df[col]>24 ]
seg_ = [ "SEGMENTO YOUNG", 'SEGMENTO MASTER','SEGMENTO LEGEND']
df["Hogar"] = np.select(condi, seg_, default=np.nan)
df['Calificación A Nivel De Suscripción'][df['Calificación A Nivel De Suscripción']==' ']=df['Calificación A Nivel De Suscripción'].mode()[0]
df['Calificación A Nivel De Suscripción']=df['Calificación A Nivel De Suscripción'].astype('int')
df['Califica_suscr_class']=pd.cut(df['Calificación A Nivel De Suscripción'],bins=5,labels=["A","B","C","D","E"]).astype(str)
df['Tipo De Documento'][df['Tipo De Documento']=='13'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].fillna('NO REGISTRA')
df['Tipo De Documento'][df['Tipo De Documento']=='1'] ='CC'
df['Tipo De Documento'][df['Tipo De Documento']==' '] ='NO REGISTRA'
df['Tipo De Documento'][df['Tipo De Documento']=='C'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].str.replace('3 Cedula Extranjeria|3|1CE','CE')\
.str.replace('1 Cedula','CC')\
.str.replace('2 Nit|2',' Nit')\
.str.replace('4 Tarjeta de Identidad|4',' TI')
#### Create, clean & group variables
df['Banco 1'][df['Banco 1']==' '] ='NO REGISTRA'
df['Banco 2'][df['Banco 2']==' '] ='NO REGISTRA'
df['Banco 1'].fillna('NO REGISTRA',inplace=True)
df['Banco 2'].fillna('NO REGISTRA',inplace=True)
df['Banco 1']=df['Banco 1'].str.upper().str.strip()
df['Banco 2']=df['Banco 2'].str.upper().str.strip()
df['Banco 1']=df['Banco 1'].str.replace('BANCO COLPATRIA','COLPATRIA')\
.str.replace('COLPATRIA ENLINEA','COLPATRIA EN LINEA')\
.str.replace('GANA GANA','GANA')\
.str.replace('GANA GANA','GANA')
df["Banco 1_virtual"] =\
np.where(df["Banco 1"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
df["Banco 2_Virtual"] =\
np.where(df["Banco 2"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
conteo_banco=df['Banco 1'].value_counts().iloc[:10].index.tolist()
df['Banco 1_Cl']=df.apply(
lambda row: row['Banco 1'] if (row['Banco 1'] in conteo_banco)
else 'OTRO BANCO',axis=1)
conteo_banco2=df['Banco 2'].value_counts().iloc[:10].index.tolist()
df['Banco 2_Cl']=df.apply(
lambda row: row['Banco 2'] if (row['Banco 2'] in conteo_banco2)
else 'OTRO BANCO',axis=1)
df['Causal'][df['Causal']==' '] ='NO REGISTRA'
df['Causal_Cl']=df['Causal']\
.str.replace('FACTURA MAYOR A LA CAPACIDAD DE PAGO|CLIENTE SE ACOGE PRODUCTO MINIMO VITAL|PRIORIDAD INGRESOS A LA CANASTA BASICA|INDISPONIBILIDAD DE MEDIOS DE PAGO POR EMERGENCIA SANITARIA|NO TIENE DINERO|INCONVENIENTES ECONOMICOS|INCONVENIENTES ECONOMICOS|CONTINGENCIA COVID-19|DESEMPLEADO|INDEPENDIENTE SIN INGRESOS DURANTE CUARENTENA|DISMINUCIÓN INGRESOS / INCONVENIENTES CON NÓMINA',
'DISMINUCIÓN DE INGRESOS')\
.str.replace('OLVIDO DE PAGO|FUERA DE LA CIUDAD|DEUDOR SE OLVIDO DEL PAGO|OLVIDO DEL PAGO / ESTA DE VIAJE',
'OLVIDO')\
.str.replace('PAGA CADA DOS MESES|PAGO BIMESTRAL','PAGO BIMESTRAL')\
.str.replace('INCONFORMIDAD EN EL VALOR FACTURADO|INCONFORMIDAD POR CAMBIO DE DOMICILIO|INCOMFORMIDAD POR CAMBIO DE DOMICILIO|PQR PENDIENTE|TIENE RECLAMO PENDIENTE','INCONFORMIDAD')\
.str.replace('OTRA PERSONA ES LA ENCARGADA DEL PAGO','OTRA PERSONA ES LA ENCARGADA DEL PAGO').str.strip()\
.str.replace('PROBLEMAS FACTURACIÓN|INCONSISTENCIAS EN CARGOS FACTURADOS|RECLAMACIÓN EN TRÃMITE|NO LE LLEGA LA FACTURA / LLEGO DESPUES DE LA FECHA DE VENCIMIENTO|LLEGO LA FACTURA DESPUES DE LA FECHA DE VENCIMIENTO|NO LLEGO FACTURA',
'FACTURA')\
.str.replace('SE NIEGA A RECIBIR INFORMACION',
'RENUENTE')\
.str.replace('INCONVENIENTES CON CANALES DE PAGO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON EL CANAL DE RECAUDO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON LA ENTIDAD BANCARIA',
'INCONVENIENTES CON PAGO')\
.str.replace('REALIZARA RETIRO DEL SERVICIO|REALIZARA RETIRO / CANCELACION SERVICIO',
'REALIZARA RETIRO')
conteo_Causa=df['Causal_Cl'].value_counts().iloc[:12].index.tolist()
df['Causal_Cl']=df.apply(
lambda row: row['Causal_Cl'] if (row['Causal_Cl'] in conteo_Causa)
else 'OTRA CAUSA',axis=1)
conteo_Corte=df['Descripcion estado de corte'].value_counts().iloc[:12].index.tolist()
df['Descripcion estado de corte_Cl']=df.apply(
lambda row: row['Descripcion estado de corte'] if (row['Descripcion estado de corte'] in conteo_Corte)
else 'OTRA MOTIVO',axis=1)
df['Descripcion estado de corte_conexión'] = np.where(df['Descripcion estado de corte'].str.contains("CONEXION"),"1",'0')
df['Descripcion estado de corte_suspención'] = np.where(df['Descripcion estado de corte'].str.contains("SUSPENSION"),"1",'0')
df['Descripcion estado de corte_retiro'] = np.where(df['Descripcion estado de corte'].str.contains("RETIRO"),"1",'0')
df['Valor Total Cobrar']=df['Valor Total Cobrar'].astype('float64')
df['Valor Vencido']=df['Valor Vencido'].astype('float64')
df['Valor Factura']=df['Valor Factura'].astype('float64')
df['Valor Intereses de Mora']=df['Valor Intereses de Mora'].astype('float64')
df['Valor financiado']=df['Valor financiado'].astype('float64')
## DROPING VARIABLES
df.drop(['Causal','Codigo edad de mora(para central de riesgos)','Codigo edad de mora(para central de riesgos)',
'Estado Adminfo','Celular con mejor Contactabilidad','Archivo Convergente','Usuario','Vector de Pago'],axis=1,inplace=True)
anis=['Teléfono última gestión','Email','Telefono con mejor Contactabilidad','Email',
'Ultimo Celular Grabado','Ultimo Telefono Grabado','Ultimo Email Grabado','Celular con mejor Contactabilidad']
df.dropna(subset = ["Direccion de instalacion"], inplace=True)
df['llave']=df['Identificacion']+"_"+df['Direccion de instalacion']
df=df.sort_values('Fecha de Asignacion',ascending=True)
## Elimino los duplicados presnetados en la combinación de dichas variables
df=df[~df[['llave','# servicio suscrito/abonado','Fecha de Asignacion','Valor Total Cobrar','Valor Vencido','Descripcion localidad']].duplicated()]
df.sort_values(by=['Identificacion','# servicio suscrito/abonado','Fecha de Asignacion'],ascending=[True,True,True]).drop_duplicates('# servicio suscrito/abonado',keep='last',inplace=True)
### Cuidado con esos pendientes por gestionar
## Cantidad de servicios
cant_serv=df.groupby(['Identificacion']).agg({'Descripcion producto':'nunique','Direccion de instalacion':'nunique'})\
.reset_index().sort_values('Descripcion producto',ascending=False)\
.rename(columns={'Descripcion producto':'cantidad_ser_dir','Direccion de instalacion':'serv_dir'})
df=pd.merge(df,cant_serv,on='Identificacion')
df=df[~df.duplicated()]
# Creo dicha variabel para evitar que hayan duplicados el mismo día
df['llave_2']=df['Identificacion']+"_"+(df['Fecha de Asignacion'].astype('str'))
#
conteo=df.groupby(['Identificacion','Fecha de Asignacion','Fecha de Asignacion_mes']).agg({'Identificacion':'nunique'}).rename(columns={'Identificacion':'cantidad_mes'}).reset_index()
conteo.sort_values('Fecha de Asignacion',ascending=True,inplace=True)
conteo=conteo[~conteo['Identificacion'].duplicated(keep='last')]
conteo['llave_2']=conteo['Identificacion']+"_"+(conteo['Fecha de Asignacion'].astype('str'))
#Se crea con el fin de identificar y quedarme con las claves de cada uno
consolidar=pd.merge(df,conteo['llave_2'],on='llave_2')
#Creo variables dummies para identificar en una misma cantidad de servicios
cer1=pd.concat([pd.get_dummies(consolidar['Descripcion producto_resumen']),consolidar],axis=1) # concateno
cer1['llave_2']=cer1['Identificacion']+"_"+(cer1['Fecha de Asignacion'].astype('str'))
cer=cer1.groupby(['Identificacion']).agg({
'Descripcion producto_resumen':np.array,'Descripcion producto_resumen':'sum',
'TELEFONIA':'sum','INTERNET':'sum','TELEVISION':'sum','UNEMOVIL':'sum',
'LARGA DISTANCIA UNE':'sum','PAQUETE':'sum','OTRO PRODUCTO':'sum','LINEA BASICA':'sum',
"Valor Vencido":"sum","Valor Total Cobrar":"sum",
"Valor financiado":"sum",
"Valor Intereses de Mora":"sum"}).reset_index().\
rename(columns={'Valor Vencido':'valor vencido_sum',
'Valor Factura':'Valor Factura_sum',
'Valor financiado':'Valor financiado_sum',
'Valor Total Cobrar':'Valor Total Cobrar_sum',
'Descripcion producto_resumen':'Total servicio',
'Valor Intereses de Mora':'Valor Intereses de Mora_sum'})
cer.drop(['Total servicio'],axis=1,inplace=True)
data=pd.merge(consolidar,cer,on='Identificacion')
data=data.sort_values(['Fecha de Asignacion','Identificacion'],ascending=[True,True]).drop_duplicates('Identificacion',keep='last')
### Base de datos de la salida
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],ascending=[True,True]).drop_duplicates(keep='last',inplace=True)
out.drop(['Unnamed: 19'],axis=1,inplace=True)
## Cruce de bases de datos de salida
full=pd.merge(data,out[['Identificacion Del Cliente','Efectivo Pago','Fecha_Pago']],
left_on='Identificacion',right_on='Identificacion Del Cliente')
full=full[~full.duplicated()]
full=full.sort_values(['Identificacion','Efectivo Pago'],ascending=[True,True]).drop_duplicates(['Identificacion'],keep='first')
full['llave_exp']=full['Identificacion']+full['# servicio suscrito/abonado']
full['valor vencido_sum'][full['valor vencido_sum'] < 0] = 0
full['ratio_vlr_vencido_cobro']=full['valor vencido_sum']/full['Valor Total Cobrar_sum']
full.drop(['llave_2','Direccion de instalacion','Banco 1','Banco 2'],axis=1,inplace=True)
### Exporto y envio a la carpeta para trabajarlo
seg['FECHA DE GESTION']= | pd.to_datetime(seg['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import pandas as pd
import json
if __name__ == "__main__":
traffic_data = {}
with open("../../public/data/traffic_data.json", "r", encoding='utf-8') as jsonFile:
traffic_data = json.load(jsonFile)
colume = [""]
time_list = []
converted_data_speed = {}
converted_data_volume = {}
first = True
for key, data in traffic_data.items():
colume.append(key)
converted_data_speed[key] = []
converted_data_volume[key] = []
for i in range(len(data["traffic"])):
if first:
time_list.append(data["traffic"][i]["time"])
converted_data_speed[key].append(data["traffic"][i]["avs"])
converted_data_volume[key].append(data["traffic"][i]["volume"])
if first:
first = False
df_avs = pd.DataFrame(columns=colume)
df_vol = pd.DataFrame(columns=colume)
df_avs[""] = time_list
df_vol[""] = time_list
for key in colume:
if key != "":
df_avs[key] = converted_data_speed[key]
df_vol[key] = converted_data_volume[key]
store_avs = | pd.HDFStore('./dcrnn/data/dataset_avs.h5') | pandas.HDFStore |
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
import numpy
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
import datetime
import calendar
class RelativeFrequencyChart:
# returns coordinates for each chart column
def get_coordinates(self, data, bins): # bins - chart columns count
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, weights=numpy.zeros_like(self.btt) + 1. / self.btt.size, bins=bins)
return self.x, self.y
class FrequencyDensityChart:
def get_coordinates_histogram(self, data, bins):
self.btt = numpy.array(list(data))
self.y, self.x, self.bars = plt.hist(self.btt, bins=bins, density=True)
return self.x, self.y
def get_coordinates_line(self, data):
try:
self.btt = numpy.array(list(data))
self.density = stats.kde.gaussian_kde(list(data))
self.x_den = numpy.linspace(0, data.max(), data.count())
self.density = self.density(self.x_den)
return self.x_den, self.density
except numpy.linalg.linalg.LinAlgError:
return [-1], [-1]
class DynamicChart:
def get_coordinates(self, frame, step_size):
self.plot = {} # chart coordinates
self.dynamic_bugs = []
self.x = []
self.y = []
self.plot['period'] = step_size
if step_size == 'W-SUN':
self.periods = DynamicChart.get_periods(self, frame, step_size) # separates DataFrame to the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0 # cumulative total of defect submission for specific period
for self.period in self.periods:
# checks whether the first day of period is Monday (if not then we change first day to Monday)
if pandas.to_datetime(self.period[0]) < pandas.to_datetime(frame['Created_tr']).min():
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min()) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min())))
self.y.append(self.cumulative)
else:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.period[0]))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(self.period[1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str((self.period[0])))
self.y.append(self.cumulative)
# check whether the date from new DataFrame is greater than date which is specified in settings
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(self.periods[-1][1]):
# processing of days which are out of full period set
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) > pandas.to_datetime(self.periods[-1][1]))
& (pandas.to_datetime(frame['Created_tr']) <=
pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(datetime.datetime.date(pandas.to_datetime(self.periods[-1][1], format='%Y-%m-%d')) + datetime.timedelta(days=1)))
self.y.append(self.cumulative)
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
self.cumulative = 0
return self.plot
if step_size in ['7D', '10D', '3M', '6M', 'A-DEC']:
self.count0 = 0
self.count1 = 1
self.periods = DynamicChart.get_periods(self, frame, step_size) # DataFrame separation by the specified periods
if len(self.periods) == 0:
return 'error'
self.cumulative = 0
self.countPeriodsList = len(self.periods) # count of calculated periods
self.count = 1
if self.countPeriodsList == 1:
if step_size == '7D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr'])
< pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())
+datetime.timedelta(days=7)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >=
pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+
datetime.timedelta(days=7))) & (pandas.to_datetime(frame['Created_tr'])
<= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=7), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '10D':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min()) & (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min())+datetime.timedelta(days=10), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '3M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 3), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == '6M':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) <
pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6)):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6))) & (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, DynamicChart.add_months(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr']).min()), 6), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
if step_size == 'A-DEC':
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(frame['Created_tr']).min())
& (pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(str(int(self.periods[0])+1)))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(frame['Created_tr'], format='%Y-%m-%d').min()), step_size)))
self.y.append(self.cumulative)
if(pandas.to_datetime(frame['Created_tr']).max() > pandas.to_datetime(str(int(self.periods[0])+1))):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(str(int(self.periods[0])+1)))
& (pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(str(int(self.periods[0])+1))), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
else:
while self.count < self.countPeriodsList:
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.periods[self.count0])) &
(pandas.to_datetime(frame['Created_tr']) < pandas.to_datetime(self.periods[self.count1]))]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(self.periods[self.count0], format='%Y-%m-%d')), step_size)))
self.y.append(self.cumulative)
self.count0 = self.count0 + 1
self.count1 = self.count1 + 1
self.count = self.count + 1
if pandas.to_datetime(frame['Created_tr']).max() >= pandas.to_datetime(self.periods[-1]):
self.newFrame = frame[(pandas.to_datetime(frame['Created_tr']) >= pandas.to_datetime(self.periods[-1])) &
(pandas.to_datetime(frame['Created_tr']) <= pandas.to_datetime(frame['Created_tr']).max())]
self.cumulative = self.cumulative + int(self.newFrame['Issue_key_tr'].count())
self.x.append(str(DynamicChart.get_date_for_dynamic_my(self, datetime.datetime.date(pandas.to_datetime(self.periods[-1], format='%Y-%m-%d')), step_size)))
self.y.append(self.cumulative)
self.cumulative = 0
self.dynamic_bugs.append(self.x)
self.dynamic_bugs.append(self.y)
self.plot['dynamic bugs'] = self.dynamic_bugs
return self.plot
# DataFrame separation (by periods)
def get_periods(self, frame, period):
self.periods = []
self.periodsFrame = pandas.period_range(start=pandas.to_datetime(frame['Created_tr']).min(), end= | pandas.to_datetime(frame['Created_tr']) | pandas.to_datetime |
from simio_lisa.simio_tables import *
import logging
import pandas as pd
import os
import plotly.express as px
from plotly.offline import plot
import time
from abc import ABC, abstractmethod
class SimioPlotter(ABC):
def __init__(self,
output_tables,
logger_level: int = logging.INFO,
**kwargs):
"""
Parent class.
:param output_tables: DICT containing all tables
:param **x_axis/y_axis/time_axis: column to be used as x/y/time axis
:param **legend_col: column to use to distinguish colors/legend
:param **objects_dict: dictionary to distinguish the groups of entities to be compared together
"""
self._tables_names = None
self._tables = output_tables
# Instance Tables
self._x_axis = kwargs.get('x_axis', None)
self._y_axis = kwargs.get('y_axis', None)
self._time_axis = kwargs.get('time_axis', None)
self._objects_dict = kwargs.get('objects_dict', None)
self._legend_col = kwargs.get('legend_col', None)
logging.getLogger().setLevel(logger_level)
@abstractmethod
def plot(self, tables, kind):
"""
Force all subclasses to have a plot method
"""
pass
@property
def tables(self):
return self._tables
@property
def tables_names(self):
return self._tables_names
@property
def time_axis(self):
return self._time_axis
@time_axis.setter
def time_axis(self, new_value):
self._time_axis = new_value
@property
def y_axis(self):
return self._y_axis
@y_axis.setter
def y_axis(self, new_value):
self._y_axis = new_value
@property
def x_axis(self):
return self._x_axis
@x_axis.setter
def x_axis(self, new_value):
self._x_axis = new_value
@property
def objects_dict(self):
return self._objects_dict
@objects_dict.setter
def objects_dict(self, new_value):
self._objects_dict = new_value
@property
def legend_col(self):
return self._legend_col
@legend_col.setter
def legend_col(self, new_value):
self._legend_col = new_value
class SimioTimeSeries(SimioPlotter):
def __init__(self,
output_tables,
logger_level: int = logging.INFO,
**kwargs):
"""
Class child of SimioPlotter to plot time series. Necessary in kwargs: time_axis and y_axis.
When using plot_columns (plot_tables) y_axis (tables) can be a list.
"""
SimioPlotter.__init__(self,
output_tables,
logger_level,
**kwargs)
def plot_columns(self, table: str):
"""
Plot TimeSeries comparing different columns (y_Axis should be a list of columns,
only one table should be provided)
"""
input_data = self.tables[table]
time_axis = self.time_axis
y_axis = self.y_axis
input_data[time_axis] = | pd.to_datetime(input_data[time_axis]) | pandas.to_datetime |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
| tm.assert_frame_equal(df, rs_c, check_names=False) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
from contextlib import contextmanager
from deepdiff import DeepDiff
import fire
import numpy as np
import os
import pandas as pd
import pathlib
from penquins import Kowalski
from pprint import pprint
import questionary
import subprocess
import sys
import tdtax
from tdtax import taxonomy # noqa: F401
from typing import Sequence, Union
import yaml
from scope.utils import (
load_config,
plot_gaia_hr,
plot_light_curve_data,
plot_gaia_density,
plot_periods,
)
@contextmanager
def status(message):
"""
Borrowed from https://github.com/cesium-ml/baselayer/
:param message: message to print
:return:
"""
print(f"[·] {message}", end="")
sys.stdout.flush()
try:
yield
except Exception:
print(f"\r[✗] {message}")
raise
else:
print(f"\r[✓] {message}")
def check_configs(config_wildcards: Sequence = ("config.*yaml",)):
"""
- Check if config files exist
- Offer to use the config files that match the wildcards
- For config.yaml, check its contents against the defaults to make sure nothing is missing/wrong
:param config_wildcards:
:return:
"""
path = pathlib.Path(__file__).parent.absolute()
for config_wildcard in config_wildcards:
config = config_wildcard.replace("*", "")
# use config defaults if configs do not exist?
if not (path / config).exists():
answer = questionary.select(
f"{config} does not exist, do you want to use one of the following"
" (not recommended without inspection)?",
choices=[p.name for p in path.glob(config_wildcard)],
).ask()
subprocess.run(["cp", f"{path / answer}", f"{path / config}"])
# check contents of config.yaml WRT config.defaults.yaml
if config == "config.yaml":
with open(path / config.replace(".yaml", ".defaults.yaml")) as config_yaml:
config_defaults = yaml.load(config_yaml, Loader=yaml.FullLoader)
with open(path / config) as config_yaml:
config_wildcard = yaml.load(config_yaml, Loader=yaml.FullLoader)
deep_diff = DeepDiff(config_wildcard, config_defaults, ignore_order=True)
difference = {
k: v
for k, v in deep_diff.items()
if k in ("dictionary_item_added", "dictionary_item_removed")
}
if len(difference) > 0:
print("config.yaml structure differs from config.defaults.yaml")
pprint(difference)
raise KeyError("Fix config.yaml before proceeding")
class Scope:
def __init__(self):
# check configuration
with status("Checking configuration"):
check_configs(config_wildcards=["config.*yaml"])
self.config = load_config(
pathlib.Path(__file__).parent.absolute() / "config.yaml"
)
# use token specified as env var (if exists)
kowalski_token_env = os.environ.get("KOWALSKI_TOKEN")
if kowalski_token_env is not None:
self.config["kowalski"]["token"] = kowalski_token_env
# try setting up K connection if token is available
if self.config["kowalski"]["token"] is not None:
with status("Setting up Kowalski connection"):
self.kowalski = Kowalski(
token=self.config["kowalski"]["token"],
protocol=self.config["kowalski"]["protocol"],
host=self.config["kowalski"]["host"],
port=self.config["kowalski"]["port"],
)
else:
self.kowalski = None
# raise ConnectionError("Could not connect to Kowalski.")
print("Kowalski not available")
def _get_features(
self,
positions: Sequence[Sequence[float]],
catalog: str = "ZTF_source_features_20210401",
max_distance: Union[float, int] = 5.0,
distance_units: str = "arcsec",
) -> pd.DataFrame:
"""Get nearest source in feature set for a set of given positions
:param positions: R.A./Decl. [deg]
:param catalog: feature catalog to query
:param max_distance:
:param distance_units: arcsec | arcmin | deg | rad
:return:
"""
if self.kowalski is None:
raise ConnectionError("Kowalski connection not established.")
if catalog is None:
catalog = self.config["kowalski"]["collections"]["features"]
query = {
"query_type": "near",
"query": {
"max_distance": max_distance,
"distance_units": distance_units,
"radec": positions,
"catalogs": {
catalog: {
"filter": {},
"projection": {
"period": 1,
"ra": 1,
"dec": 1,
},
}
},
},
}
response = self.kowalski.query(query=query)
features_nearest = [
v[0] for k, v in response.get("data").get(catalog).items() if len(v) > 0
]
df = pd.DataFrame.from_records(features_nearest)
return df
def _get_nearest_gaia(
self,
positions: Sequence[Sequence[float]],
catalog: str = None,
max_distance: Union[float, int] = 5.0,
distance_units: str = "arcsec",
) -> pd.DataFrame:
"""Get nearest Gaia source for a set of given positions
:param positions: R.A./Decl. [deg]
:param catalog: Gaia catalog to query
:param max_distance:
:param distance_units: arcsec | arcmin | deg | rad
:return:
"""
if self.kowalski is None:
raise ConnectionError("Kowalski connection not established.")
if catalog is None:
catalog = self.config["kowalski"]["collections"]["gaia"]
query = {
"query_type": "near",
"query": {
"max_distance": max_distance,
"distance_units": distance_units,
"radec": positions,
"catalogs": {
catalog: {
"filter": {},
"projection": {
"parallax": 1,
"parallax_error": 1,
"pmra": 1,
"pmra_error": 1,
"pmdec": 1,
"pmdec_error": 1,
"phot_g_mean_mag": 1,
"phot_bp_mean_mag": 1,
"phot_rp_mean_mag": 1,
"ra": 1,
"dec": 1,
},
}
},
},
"kwargs": {"limit": 1},
}
response = self.kowalski.query(query=query)
gaia_nearest = [
v[0] for k, v in response.get("data").get(catalog).items() if len(v) > 0
]
df = pd.DataFrame.from_records(gaia_nearest)
df["M"] = df["phot_g_mean_mag"] + 5 * np.log10(df["parallax"] * 0.001) + 5
df["Ml"] = (
df["phot_g_mean_mag"]
+ 5 * np.log10((df["parallax"] + df["parallax_error"]) * 0.001)
+ 5
)
df["BP-RP"] = df["phot_bp_mean_mag"] - df["phot_rp_mean_mag"]
return df
def _get_light_curve_data(
self,
ra: float,
dec: float,
catalog: str = "ZTF_sources_20201201",
cone_search_radius: Union[float, int] = 2,
cone_search_unit: str = "arcsec",
filter_flagged_data: bool = True,
) -> pd.DataFrame:
"""Get light curve data from Kowalski
:param ra: R.A. in deg
:param dec: Decl. in deg
:param catalog: collection name on Kowalski
:param cone_search_radius:
:param cone_search_unit: arcsec | arcmin | deg | rad
:param filter_flagged_data: remove flagged/bad data?
:return: flattened light curve data as pd.DataFrame
"""
if self.kowalski is None:
raise ConnectionError("Kowalski connection not established.")
query = {
"query_type": "cone_search",
"query": {
"object_coordinates": {
"cone_search_radius": cone_search_radius,
"cone_search_unit": cone_search_unit,
"radec": {"target": [ra, dec]},
},
"catalogs": {
catalog: {
"filter": {},
"projection": {
"_id": 1,
"filter": 1,
"field": 1,
"data.hjd": 1,
"data.fid": 1,
"data.mag": 1,
"data.magerr": 1,
"data.ra": 1,
"data.dec": 1,
"data.programid": 1,
"data.catflags": 1,
},
}
},
},
}
response = self.kowalski.query(query=query)
light_curves_raw = response.get("data").get(catalog).get("target")
light_curves = []
for light_curve in light_curves_raw:
df = pd.DataFrame.from_records(light_curve["data"])
# broadcast to all data points:
df["_id"] = light_curve["_id"]
df["filter"] = light_curve["filter"]
df["field"] = light_curve["field"]
light_curves.append(df)
df = pd.concat(light_curves, ignore_index=True)
if filter_flagged_data:
mask_flagged_data = df["catflags"] != 0
df = df.loc[~mask_flagged_data]
return df
@staticmethod
def develop():
"""Install developer tools"""
subprocess.run(["pre-commit", "install"])
@classmethod
def lint(cls):
"""Lint sources"""
try:
import pre_commit # noqa: F401
except ImportError:
cls.develop()
try:
subprocess.run(["pre-commit", "run", "--all-files"], check=True)
except subprocess.CalledProcessError:
sys.exit(1)
def doc(self):
"""Build docs"""
# generate taxonomy.html
with status("Generating taxonomy visualization"):
path_static = pathlib.Path(__file__).parent.absolute() / "doc" / "_static"
if not path_static.exists():
path_static.mkdir(parents=True, exist_ok=True)
tdtax.write_viz(
self.config["taxonomy"], outname=path_static / "taxonomy.html"
)
# generate images for the Field Guide
if (self.kowalski is None) or (not self.kowalski.ping()):
print("Kowalski connection not established, cannot generate docs.")
return
period_limits = {
"cepheid": [1.0, 100.0],
"delta_scuti": [0.03, 0.3],
"beta_lyr": [0.3, 25],
"rr_lyr": [0.2, 1.0],
"w_uma": [0.2, 0.8],
}
period_loglimits = {
"cepheid": True,
"delta_scuti": False,
"beta_lyr": True,
"rr_lyr": False,
"w_uma": False,
}
# example periods
with status("Generating example period histograms"):
path_doc_data = pathlib.Path(__file__).parent.absolute() / "doc" / "data"
# stored as ra/decs in csv format under /data/golden
golden_sets = pathlib.Path(__file__).parent.absolute() / "data" / "golden"
for golden_set in golden_sets.glob("*.csv"):
golden_set_name = golden_set.stem
positions = | pd.read_csv(golden_set) | pandas.read_csv |
import pandas as pd
import numpy as np
from tkinter import *
from tkinter import filedialog
# Importing Chen Values
chen_67_to_69 = pd.read_csv('chcof1.id', index_col=0)
chen_70_to_74 = pd.read_csv('chcof2.id', index_col=0)
chen_75_to_80 = pd.read_csv('chcof3.id', index_col=0)
chen_81_to_85 = pd.read_csv('chcof4.id', index_col=0)
chen_86_to_90 = pd.read_csv('chcof5.id', index_col=0)
# Importing McGuire Values
mcg_57_to_59 = pd.read_csv('mgcof1.id', index_col=0)
mcg_60_to_69 = pd.read_csv('mgcof2.id', index_col=0)
mcg_70_to_73 = pd.read_csv('mgcof3.id', index_col=0)
mcg_74_to_76 = pd.read_csv('mgcof4.id', index_col=0)
mcg_77_to_90 = | pd.read_csv('mgcof5.id', index_col=0) | pandas.read_csv |
"""
HI/HA images.
The purpose of this code is to analyze HI/HA images.
We compare controls and patients with HIHA
We do this by looking at glucest images as a proxy for glutamate levels and brain
activation/funtion
We are primarily interested in the hippocampal areas, but also look at distribution
of glucest values in other areas
Primary metrics: glucest means and medians in hippocampus. Assymetry index,
hippocamal volumes, and distributions of glucest values across the brain.
"""
# pylint: disable=import-error, unused-import
# %% Imports
import copy
import os
import sklearn
import numpy as np
import pandas as pd
import seaborn as sns
import nibabel as nib
import nibabel.processing
import scipy.stats as st
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn import datasets, linear_model
import analysis.helper as hp
import analysis.constants as const
import scipy.ndimage
# %% CEST spreadsheet analysis
# =============================================================================
# Recreation of plots given by data from rosenfeld (the HIHA_cest.csv file)
# We look at the peak cest values (the greater cest value between the left or
# right hippocampus)
# =============================================================================
df = | pd.read_csv(const.PATH_HIHA_CEST + "HIHA_cest.csv") | pandas.read_csv |
# #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
#
# """
# modeling_morphogenesis.py
# A short description of the project.
#
# Handles the primary functions
# """
#
# import sys
# import argparse
#
#
# def warning(*objs):
# """Writes a message to stderr."""
# print("WARNING: ", *objs, file=sys.stderr)
#
#
# def canvas(with_attribution=True):
# """
# Placeholder function to show example docstring (NumPy format)
#
# Replace this function and doc string for your own project
#
# Parameters
# ----------
# with_attribution : bool, Optional, default: True
# Set whether or not to display who the quote is from
#
# Returns
# -------
# quote : str
# Compiled string including quote and optional attribution
# """
#
# quote = "The code is but a canvas to our imagination."
# if with_attribution:
# quote += "\n\t- Adapted from <NAME>"
# return quote
#
#
# def parse_cmdline(argv):
# """
# Returns the parsed argument list and return code.
# `argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
# """
# if argv is None:
# argv = sys.argv[1:]
#
# # initialize the parser object:
# parser = argparse.ArgumentParser()
# # parser.add_argument("-i", "--input_rates", help="The location of the input rates file",
# # default=DEF_IRATE_FILE, type=read_input_rates)
# parser.add_argument("-n", "--no_attribution", help="Whether to include attribution",
# action='store_false')
# args = None
# try:
# args = parser.parse_args(argv)
# except IOError as e:
# warning("Problems reading file:", e)
# parser.print_help()
# return args, 2
#
# return args, 0
#
#
# def main(argv=None):
# args, ret = parse_cmdline(argv)
# if ret != 0:
# return ret
# print(canvas(args.no_attribution))
# return 0 # success
#
#
# if __name__ == "__main__":
# status = main()
# sys.exit(status)
#Ensuring compliance of code with both python2 and python3
from __future__ import division, print_function
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
import numpy as np
import scipy
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
sns.set_context('talk')
import jpype
import pyNetLogo
import os
# os.chdir('/Users/agnesresto/Documents/NetLogo 6.0.4')
# print(os.environ)
# os.environ["JAVA\_HOME"] = "//Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/bin/java python setup.py install"
# print(os.environ["JAVA\_HOME"]) #makes sure that java home system variable was set correctly
#Import the sampling and analysis modules for a Sobol variance-based
#sensitivity analysis
from SALib.sample import saltelli
from SALib.analyze import sobol
import pyNetLogo
netlogo = pyNetLogo.NetLogoLink(gui=False,netlogo_home = '/Users/agnesresto/Documents/NetLogo 6.0.4')
netlogo.load_model('/Users/agnesresto/modeling_morphogenesis/modeling_morphogenesis/modeling_morphogenesis/Wolf Sheep Predation_v6.nlogo')
problem = {
'num_vars': 6,
'names': ['random-seed',
'grass-regrowth-time',
'sheep-gain-from-food',
'wolf-gain-from-food',
'sheep-reproduce',
'wolf-reproduce'],
'bounds': [[1, 100000],
[20., 40.],
[2., 8.],
[16., 32.],
[2., 8.],
[2., 8.]]
}
n = 10
param_values = saltelli.sample(problem, n, calc_second_order=True)
param_values.shape
results = | pd.DataFrame(columns=['Avg. sheep', 'Avg. wolves']) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with | pd.option_context('display.width', 300) | pandas.option_context |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = | pd.DataFrame(value_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
| tm.assert_index_equal(inner, pidx) | pandas.util.testing.assert_index_equal |
"""
Auxiliary functions
"""
import warnings
warnings.filterwarnings("ignore")
import os
import json
import joblib
import numpy as np
import pandas as pd
from itertools import permutations, combinations, product
from numba import njit, prange
from tqdm import tqdm
import networkx as nx
import multiprocessing as mp
from math import factorial
from copy import copy
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import sparse
import resource
import csv
import networkx as nx
from networkx.algorithms.distance_measures import diameter
from networkx.algorithms.components import is_weakly_connected, is_strongly_connected, strongly_connected_components
from networkx.algorithms.centrality import degree_centrality, betweenness_centrality
from networkx.convert_matrix import to_numpy_array
from networkx.algorithms.swap import double_edge_swap
from collections import namedtuple
n_combs = lambda n, k: int(factorial(n)/factorial(n-k)/factorial(k))
def read_ecoli_network(path):
f = open(path)
line = f.readline()
while line.startswith('#'):
line = f.readline()
df = pd.read_csv(f, sep="\t", header=None)
df.loc[-1] = line.split("\t")
df.index = df.index + 1
df = df.sort_index()
f.close()
return df
def get_actual_parametrization(source, check_input=True, verbose=False):
cfg = source if type(source) is dict else json.load(open(source, "r"))
if check_input:
assert cfg["NETWORK_TO_SEARCH_IN"] in ["ecoli", "test", "yeast", "ecoli", "gs0.01", "gs0.1", "gs1"]
if verbose:
for param, value in cfg.items():
print(f"{param}: {value}")
return cfg
def update_cfg(path, param, value, verbose=False):
cfg = get_actual_parametrization(path, check_input=False, verbose=False)
cfg[param] = value
cfg = get_actual_parametrization(cfg, verbose=verbose)
json.dump(cfg, open(path, "w"))
return cfg
def get_interaction_matrix(config_file):
cwd = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
network = config_file["NETWORK_TO_SEARCH_IN"]
interaction_matrix = joblib.load(
os.path.join(cwd, "networks", network, f"interaction_matrix.gz")
)
return interaction_matrix
def build_motif_from_string(string):
return np.array(list(map(int, string.split()))).reshape(3, 3)
def get_equivalents(core_pattern):
pattern_variants = []
for permutation in permutations(range(3)):
variant = core_pattern[permutation, :]
variant = variant[:, permutation]
for prev_variant in pattern_variants:
if (variant - prev_variant == np.zeros((3, 3))).all():
break
else:
pattern_variants.append(variant)
return pattern_variants
def print_equivalents(config_file):
m = build_motif_from_string(json.load(open("./motifs_collection.json", "r"))[config_file["MOTIF_TO_SEARCH_FOR"]])
if config_file["SELFLOOPS_INCLUDED"]: m += np.diag([1]*3)
equivalents = get_equivalents(m)
print(f"""Equivalent forms for {config_file["MOTIF_TO_SEARCH_FOR"]}{" with selfloops" if config_file["SELFLOOPS_INCLUDED"] else ""}\
({len(equivalents)} total):""")
for x in equivalents:
print(x)
print()
def get_triad_codes(path=None):
motifs = json.load(open("../motifs_collection.json", "r"))
salt = np.array([2**i for i in range(6)])
mapping = {x: i for i, x in enumerate(motifs.keys())}
codes = {}
for motif in motifs.keys():
form = build_motif_from_string(motifs[motif])
isoforms = get_equivalents(form)
for isoform in isoforms:
mask = np.concatenate([np.diag(isoform, k=i) for i in [-2, -1, 1, 2]])
code = mask @ np.array([2**i for i in range(6)])
codes[code] = mapping[motif]
xcodes = [-1 for _ in range(sum(salt)+1)]
for code, motif in codes.items():
xcodes[code] = motif
xcodes
return xcodes, {i: x for x, i in mapping.items()}
@njit(cache=True)
def get_motifs(interaction_matrix, combs, codes, n):
triads = [[(-1, -1, -1)] for _ in range(n)]
salt = np.array([2**i for i in range(6)]).astype(np.float64)
n_combinations = len(combs)
for i in prange(n_combinations):
c = combs[i]
cl = np.array(c)
triad = interaction_matrix[cl, :][:, cl]
mask = [0]
for k in [-2, -1, 1, 2]:
mask += list(np.diag(triad, k=k))
mask = np.array(mask[1:]).astype(np.float64)
code = int(mask @ salt)
idx = codes[code]
if idx == -1:
pass
else:
triads[idx] += [c]
return triads
def motif_search(config_file, interaction_matrix, batch_size, dump=False, verbose=False):
network_name = config_file["NETWORK_TO_SEARCH_IN"]
codes, mapping = get_triad_codes()
N_CORES = mp.cpu_count() if config_file["N_CORES_TO_USE"] == -1 else config_file["N_CORES_TO_USE"]
def connected_triads_generator(interaction_matrix):
if type(interaction_matrix) == 'scipy.sparse.csr.csr_matrix':
interaction_matrix = sparse.csr_matrix.toarray(interaction_matrix)
interaction_matrix_adj = interaction_matrix - np.diag(np.diag(interaction_matrix))
tg_idxs, tf_idxs = np.where(interaction_matrix_adj != 0)
links = pd.DataFrame(index=range(len(tf_idxs)), columns=["tf", "tg"])
links.tf = tf_idxs
links.tg = tg_idxs
links_tf = links.set_index("tf", drop=False)[["tg"]]
cascades = links.join(links_tf[["tg"]], on="tg", how="inner", rsuffix="_final")
cascades = cascades[cascades.tf != cascades.tg_final]
for cascade in cascades.values:
yield tuple(cascade)
grouper = links.groupby("tg")
counter = grouper["tf"].count()
for tg in counter[counter > 1].index:
tf_pairs = combinations(links[links.tg == tg].tf.values, 2)
for tf_1, tf_2 in tf_pairs:
yield tf_1, tf_2, tg
grouper = links.groupby("tf")
counter = grouper["tg"].count()
for tf in counter[counter > 1].index:
tg_pairs = combinations(links[links.tf == tf].tg.values, 2)
for tg_1, tg_2 in tg_pairs:
yield tf, tg_1, tg_2
triads = connected_triads_generator(interaction_matrix)
def batch_generator(triads):
batch = []
counter = 0
for triad in triads:
batch.append(triad)
counter += 1
if counter == batch_size:
yield batch
batch = []
counter = 0
yield batch
def processor(splitted_triads):
def gen_to_queue(input_q, splitted_triads):
for batch in splitted_triads:
input_q.put(batch)
for _ in range(N_CORES):
input_q.put(None)
def process(input_q, output_q):
while True:
batch = input_q.get()
if batch is None:
output_q.put(None)
break
output_q.put(get_motifs(interaction_matrix, batch, codes, len(mapping)))
input_q = mp.Queue(maxsize = N_CORES * 2)
output_q = mp.Queue(maxsize = N_CORES * 2)
gen_pool = mp.Pool(1, initializer=gen_to_queue, initargs=(input_q, splitted_triads))
pool = mp.Pool(N_CORES, initializer=process, initargs=(input_q, output_q))
finished_workers = 0
while True:
result = output_q.get()
if result is None:
finished_workers += 1
if finished_workers == N_CORES:
break
else:
yield result
input_q = None
output_q = None
gen_pool.close()
gen_pool.join()
pool.close()
pool.join()
splitted_triads = batch_generator(triads)
motifs_generator = processor(splitted_triads)
motifs = [[] for _ in range(len(mapping))]
for batch in tqdm(motifs_generator) if verbose else motifs_generator:
for i in range(len(mapping)):
if batch[i][1:] != []:
for triad in batch[i][1:]:
motifs[i].append("_".join(map(str, sorted(triad))))
motifs = {mapping[i]: list(set(motifs[i])) for i in range(len(mapping))}
counter = {x: len(y) for x, y in motifs.items()}
if dump:
joblib.dump(motifs, f"./networks/{network_name}/motifs.gz")
json.dump(counter, open(f"./networks/{network_name}/counter.json", "w"))
return motifs, counter
def count_triads_nx(interaction_matrix):
G = nx.DiGraph(interaction_matrix.T)
return nx.algorithms.triads.triadic_census(G)
def get_metrics_report(interaction_matrix):
Report = namedtuple(
"report",
["degree_seq", "avg_degree", "diameter_strong", "diameter_weak",
"largest_component_frac", "degree_centrality", "betweenness_centrality"]
)
G = nx.DiGraph(interaction_matrix.T)
degree_seq = pd.Series(np.array([x[1] for x in G.degree]))
avg_degree = degree_seq.mean()
diameter_weak = diameter(G.to_undirected()) if is_weakly_connected(G) else np.inf
if is_strongly_connected(G):
diameter_strong = diameter(G)
largest_component_frac = 1
else:
diameter_strong = np.inf
strong_components = [(c, len(c)) for c in strongly_connected_components(G)]
strong_components = sorted(strong_components, key=lambda x: x[1], reverse=True)
largest_component_frac = strong_components[0][1]/interaction_matrix.shape[0]
dc = pd.Series(degree_centrality(G))
bc = pd.Series(betweenness_centrality(G))
report = Report(*[degree_seq, avg_degree, diameter_strong, diameter_weak, largest_component_frac, dc, bc])
return report
def get_loops(matrix):
m = matrix + matrix.T
x = sorted([sorted([x, y]) for x, y in zip(*np.where(m == 2))])
y = [x[k] for k in range(len(x)) if k % 2 == 0]
return y
@njit
def get_shuffled_matrix(interaction_matrix, nswaps):
shuffled = interaction_matrix.copy()
tf_nodes = np.where(shuffled.sum(axis=0) != 0)[0]
for i in range(nswaps):
tf_1, tf_2 = np.random.choice(tf_nodes, size=2, replace=True)
tg = shuffled[:, np.array([tf_1, tf_2])]
x = np.where((tg[:, 0] == 1) & (tg[:, 1] == 0))[0]
if x.shape[0] > 0:
tg_1 = np.random.choice(x)
else:
continue
y = np.where((tg[:, 1] == 1) & (tg[:, 0] == 0))[0]
if y.shape[0] > 0:
tg_2 = np.random.choice(y)
else:
continue
s = shuffled[np.array([tg_1, tg_2]), :][:, np.array([tf_1, tf_2])]
e1 = np.diag(np.array([1, 1]))
e2 = e1[::-1]
if (s == e1).all():
shuffled[tg_1, tf_1] = 0
shuffled[tg_1, tf_2] = 1
shuffled[tg_2, tf_1] = 1
shuffled[tg_2, tf_2] = 0
else:
shuffled[tg_1, tf_1] = 1
shuffled[tg_1, tf_2] = 0
shuffled[tg_2, tf_1] = 0
shuffled[tg_2, tf_2] = 1
return shuffled
def corruption_score(shuffled_matrix, interaction_matrix):
i, j = np.where(interaction_matrix == 1)
return shuffled_matrix[i, j].sum()/interaction_matrix[i, j].sum()
def plot_distr(counters_shuffled, counter_orig, label, highlight):
df = | pd.DataFrame(columns=["motif", "abundance", "network"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 09:20:37 2021
Additional utilities used in multiple scripts for shread_dash.py
@author: buriona,tclarkin
"""
import time
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
from database import db
import datetime as dt
from datetime import timezone
from requests import get as r_get
from requests.exceptions import ReadTimeout
from io import StringIO
# Function for importing data
def import_snotel(site_triplet, start_date, end_date, vars=["WTEQ","SNWD","PREC","TAVG"],dtype="dv",verbose=False):
"""Download NRCS SNOTEL data
Parameters
---------
site_triplet: three part SNOTEL triplet (e.g., 713_CO_SNTL)
start_date: datetime
end_date: datetime
vars: array of variables for import (tested with WTEQ, SNWD, PREC, TAVG..other options may be available)
dtype: str (only daily, dv, supported)
verbose: boolean
True : enable print during function run
Returns
-------
dataframe
"""
# Set filepath extension for dtype
if dtype == "dv":
ext = "DAILY"
if dtype == "iv":
ext = "DAILY"
# TODO: basinaverage data
# TODO: soil moisture
# Create output index and dataframe
dates = | pd.date_range(start_date, end_date, freq="D", tz='UTC') | pandas.date_range |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: data_explore.py
@time: 2019-05-08 10:21
"""
import pandas as pd
from feature_selector import FeatureSelector
from mayiutils.datasets.data_preprocessing import DataExplore as de
import math
if __name__ == '__main__':
mode = 5
if mode == 5:
"""
特征工程
"""
mzdf = pd.read_csv('../data/mz_all_claim.csv', encoding='gbk', index_col=0,
parse_dates=['出生日期', '就诊结帐费用发生日期'])
mzdf['出生月'] = mzdf['出生日期'].dt.month
mzdf['就诊结帐费用发生月'] = mzdf['就诊结帐费用发生日期'].dt.month
mzdf['就诊结帐费用发生月'] = mzdf['就诊结帐费用发生日期'].dt.weekday
del mzdf['主被保险人客户号']
del mzdf['出险人客户号']
del mzdf['出生日期']
del mzdf['收据号']
del mzdf['医院代码']
del mzdf['就诊结帐费用发生日期']
del mzdf['费用合计']
del mzdf['疾病代码']
mzdf.index = mzdf['总案号_分案号']
del mzdf['总案号_分案号']
del mzdf['费用金额']
del mzdf['自费金额']
del mzdf['部分自付金额']
del mzdf['医保支付金额']
del mzdf['自费总金额']
del mzdf['费用金额mean']
fs = FeatureSelector(data=mzdf)
fs.identify_missing(missing_threshold=0.6)
print(fs.record_missing)
fs.identify_single_unique()
print(fs.record_single_unique)
fs.identify_collinear(correlation_threshold=0.975)
print(fs.record_collinear)
"""
"""
train_removed = fs.remove(methods=['missing', 'single_unique', 'collinear'])
train_removed.info()
# train_removed.to_csv('../data/mz_train_data.csv', encoding='gbk', index=True)
if mode == 4:
"""
事件压成赔案
把费用和相对应的就诊类型名称相加
统计每个赔案包含的事件数
统计每个赔案的去医院数量
统计每个赔案的诊断数量
统计每个赔案的出险人数量
统计每个赔案去的不同等级的医院数量 暂时没做!
各种费用占总费用比
各种费用的log(去掉原费用)
"""
mzdf = pd.read_csv('../data/mz_all_event.csv', encoding='gbk', index_col=0,
parse_dates=['出生日期', '就诊结帐费用发生日期'])
flist = [
'费用金额', '自费金额', '部分自付金额', '医保支付金额',
'费用项目名称_中成药费', '费用项目名称_中草药', '费用项目名称_其他费', '费用项目名称_化验费',
'费用项目名称_床位费', '费用项目名称_手术费', '费用项目名称_护理费', '费用项目名称_挂号费',
'费用项目名称_材料费', '费用项目名称_检查费', '费用项目名称_治疗费', '费用项目名称_西药费',
'费用项目名称_诊疗费'
]
train_removedg = mzdf.groupby(['总案号_分案号'])[flist].sum()
train_removedg1 = mzdf.groupby(['总案号_分案号'])['收据号'].count()
train_removedg1.columns = ['event_num']
train_removedg1.name = 'event_num'
print(train_removedg1.columns)
print(train_removedg1.head())
# 统计去不同医院的数量
def t(arr):
return arr.unique().shape[0]
train_removedg2 = mzdf.groupby(['总案号_分案号'])['医院等级'].agg(t)
train_removedg2.columns = ['hos_num']
train_removedg2.name = 'hos_num'
# 统计诊断的数量
train_removedg3 = mzdf.groupby(['总案号_分案号'])['疾病代码'].agg(t)
train_removedg3.columns = ['疾病代码_num']
train_removedg3.name = '疾病代码_num'
# 统计出险人的数量
train_removedg4 = mzdf.groupby(['总案号_分案号'])['出险人客户号'].agg(t)
train_removedg4.columns = ['出险人客户号_num']
train_removedg4.name = '出险人客户号_num'
mzdf = mzdf.drop_duplicates(subset=['总案号_分案号'], keep='first')
for i in flist:
del mzdf[i]
mzdf = pd.merge(mzdf, train_removedg, how='left', left_on=['总案号_分案号'], right_index=True)
mzdf = pd.merge(mzdf, train_removedg1, how='left', left_on=['总案号_分案号'], right_index=True)
mzdf = pd.merge(mzdf, train_removedg2, how='left', left_on=['总案号_分案号'], right_index=True)
mzdf = pd.merge(mzdf, train_removedg3, how='left', left_on=['总案号_分案号'], right_index=True)
mzdf = pd.merge(mzdf, train_removedg4, how='left', left_on=['总案号_分案号'], right_index=True)
# 构造特征
# 自费金额占比
mzdf['自费总金额'] = mzdf['自费金额'] + mzdf['部分自付金额']
# 自费总金额占费用金额比
mzdf['自费总金额占比'] = mzdf['自费总金额'] / mzdf['费用金额']
# 医保支付金额占比
mzdf['医保支付金额占比'] = mzdf['医保支付金额'] / mzdf['费用金额']
# 平均每次事件费用金额
mzdf['费用金额mean'] = mzdf['费用金额'] / mzdf['event_num']
# log
def tlog(x):
if x < 1:
x = 0
if x != 0:
x = math.log10(x)
return x
mzdf['费用金额log'] = mzdf['费用金额'].apply(tlog)
mzdf['自费金额log'] = mzdf['自费金额'].apply(tlog)
mzdf['部分自付金额log'] = mzdf['部分自付金额'].apply(tlog)
mzdf['医保支付金额log'] = mzdf['医保支付金额'].apply(tlog)
mzdf['自费总金额log'] = mzdf['自费总金额'].apply(tlog)
mzdf['费用金额meanlog'] = mzdf['费用金额mean'].apply(tlog)
print(mzdf.shape) # (267303, 74)
mzdf.info()
"""
Int64Index: 152420 entries, 0 to 301570
Data columns (total 74 columns):
主被保险人客户号 152420 non-null int64
出险人客户号 152420 non-null int64
年龄 152420 non-null int64
出生日期 152420 non-null datetime64[ns]
收据号 152420 non-null object
医院代码 152420 non-null object
医院等级 152420 non-null int64
就诊结帐费用发生日期 152420 non-null datetime64[ns]
费用合计 152420 non-null float64
疾病代码 152420 non-null object
总案号_分案号 152420 non-null object
保单号_82200946504 152420 non-null int64
保单号_82200946505 152420 non-null int64
保单号_82200946506 152420 non-null int64
生效日期_2016-06-01 00:00:00 152420 non-null int64
生效日期_2017-06-01 00:00:00 152420 non-null int64
生效日期_2018-06-01 00:00:00 152420 non-null int64
人员属性_主被保险人 152420 non-null int64
人员属性_连带被保险人 152420 non-null int64
证件类型_0 152420 non-null int64
证件类型_1 152420 non-null int64
证件类型_8 152420 non-null int64
性别_女 152420 non-null int64
性别_男 152420 non-null int64
出险原因_意外 152420 non-null int64
出险原因_疾病 152420 non-null int64
险种代码_MIK01 152420 non-null int64
险种代码_NIK01 152420 non-null int64
险种代码_NIK02 152420 non-null int64
险种代码_NIK11 152420 non-null int64
就诊类型名称_一般门诊 152420 non-null int64
就诊类型名称_中医药 152420 non-null int64
就诊类型名称_其他约定 152420 non-null int64
就诊类型名称_接种疫苗 152420 non-null int64
就诊类型名称_普通生育门诊 152420 non-null int64
就诊类型名称_牙科治疗 152420 non-null int64
就诊类型名称_精神疾病门诊 152420 non-null int64
就诊类型名称_统筹住院 152420 non-null int64
就诊类型名称_统筹门诊 152420 non-null int64
就诊类型名称_药房购药 152420 non-null int64
就诊类型名称_门诊就诊 152420 non-null int64
就诊类型名称_门诊意外就诊 152420 non-null int64
就诊类型名称_预防性检查 152420 non-null int64
费用金额 152420 non-null float64
自费金额 152420 non-null float64
部分自付金额 152420 non-null float64
医保支付金额 152420 non-null float64
费用项目名称_中成药费 152420 non-null int64
费用项目名称_中草药 152420 non-null int64
费用项目名称_其他费 152420 non-null int64
费用项目名称_化验费 152420 non-null int64
费用项目名称_床位费 152420 non-null int64
费用项目名称_手术费 152420 non-null int64
费用项目名称_护理费 152420 non-null int64
费用项目名称_挂号费 152420 non-null int64
费用项目名称_材料费 152420 non-null int64
费用项目名称_检查费 152420 non-null int64
费用项目名称_治疗费 152420 non-null int64
费用项目名称_西药费 152420 non-null int64
费用项目名称_诊疗费 152420 non-null int64
event_num 152420 non-null int64
hos_num 152420 non-null int64
疾病代码_num 152420 non-null int64
出险人客户号_num 152420 non-null int64
自费总金额 152420 non-null float64
自费总金额占比 152420 non-null float64
医保支付金额占比 152420 non-null float64
费用金额mean 152420 non-null float64
费用金额log 152420 non-null float64
自费金额log 152420 non-null float64
部分自付金额log 152420 non-null float64
医保支付金额log 152420 non-null float64
自费总金额log 152420 non-null float64
费用金额meanlog 152420 non-null float64
dtypes: datetime64[ns](2), float64(15), int64(53), object(4)
memory usage: 87.2+ MB
"""
mzdf.to_csv('../data/mz_all_claim.csv', encoding='gbk', index=True)
if mode == 3:
"""
收据压成事件
同一时间、同一医院、同一科室(没有科室的话,就同一诊断)
只要把费用和相对应的就诊类型名称相加即可
"""
mzdf = pd.read_csv('../data/mz_all_receipt.csv', encoding='gbk', index_col=0, parse_dates=['出生日期', '就诊结帐费用发生日期'])
flist = [
'费用金额', '自费金额', '部分自付金额', '医保支付金额',
'费用项目名称_中成药费', '费用项目名称_中草药', '费用项目名称_其他费', '费用项目名称_化验费',
'费用项目名称_床位费', '费用项目名称_手术费', '费用项目名称_护理费', '费用项目名称_挂号费',
'费用项目名称_材料费', '费用项目名称_检查费', '费用项目名称_治疗费', '费用项目名称_西药费',
'费用项目名称_诊疗费'
]
train_removedg = mzdf.groupby(['总案号_分案号', '医院代码', '疾病代码', '就诊结帐费用发生日期'])[flist].sum()
mzdf = mzdf.drop_duplicates(subset=['总案号_分案号', '医院代码', '疾病代码', '就诊结帐费用发生日期'], keep='first')
print(mzdf.shape) # (267303, 60)
for i in flist:
del mzdf[i]
mzdf = pd.merge(mzdf, train_removedg, how='left', left_on=['总案号_分案号', '医院代码', '疾病代码', '就诊结帐费用发生日期'], right_index=True)
mzdf.to_csv('../data/mz_all_event.csv', encoding='gbk', index=True)
if mode == 2:
"""
明细压成收据
只要把费用和相对应的就诊类型名称相加即可
"""
mzdf = pd.read_csv('../data/mz_all.csv', index_col=0, encoding='gbk', parse_dates=['生效日期', '出生日期', '就诊结帐费用发生日期'])
mzdf1 = pd.read_csv('../data/mz_all(1).csv', encoding='gbk', parse_dates=['生效日期', '出生日期', '就诊结帐费用发生日期'])
mzdf['费用项目名称'] = mzdf1['费用项目名称']
del mzdf1
del mzdf['ROWNUM']
# 删除名称 因为有代码
del mzdf['主被保险人']
del mzdf['出险人姓名']
del mzdf['险种名称']
del mzdf['医院名称']
del mzdf['费用项目代码']
del mzdf['疾病名称']
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='牙科医疗'] = '牙科治疗'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='牙齿护理'] = '牙科治疗'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='牙科护理'] = '牙科治疗'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='牙科保健'] = '牙科治疗'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='紧急牙科治疗'] = '牙科治疗'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='普通生育门诊'] = '生育'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='门诊疾病就诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='统筹门诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='一般门诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='其他约定1门诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='门诊疾病就诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='门诊意外就诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='门诊意外首次就诊'] = '门诊就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='住院'] = '住院就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='统筹住院'] = '住院就诊'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='其他约定1'] = '其他约定'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='统筹约定'] = '其他约定'
mzdf['就诊类型名称'][mzdf['就诊类型名称']=='中医药'] = '药房购药'
print(mzdf['就诊类型名称'].value_counts())
mzdf['费用项目名称'][mzdf['费用项目名称']=='中成药'] = '中成药费'
print(mzdf['费用项目名称'].value_counts())
mzdf = de.build_one_hot_features(mzdf, ['保单号', '生效日期', '人员属性', '证件类型', '性别', '出险原因', '险种代码', '就诊类型名称', '费用项目名称'])
mzdf.info()
flist = [
'费用金额', '自费金额', '部分自付金额', '医保支付金额',
'费用项目名称_中成药费', '费用项目名称_中草药', '费用项目名称_其他费', '费用项目名称_化验费',
'费用项目名称_床位费', '费用项目名称_手术费', '费用项目名称_护理费', '费用项目名称_挂号费',
'费用项目名称_材料费', '费用项目名称_检查费', '费用项目名称_治疗费', '费用项目名称_西药费',
'费用项目名称_诊疗费'
]
train_removedg = mzdf.groupby(['总案号_分案号', '收据号'])[flist].sum()
mzdf = mzdf.drop_duplicates(subset=['总案号_分案号', '收据号'], keep='first')
print(mzdf.shape)#(657371, 60)
for i in flist:
del mzdf[i]
mzdf = pd.merge(mzdf, train_removedg, how='left', left_on=['总案号_分案号', '收据号'], right_index=True)
print(mzdf[mzdf['费用合计']-mzdf['费用金额'] > 0.001])
mzdf.to_csv('../data/mz_all_receipt.csv', encoding='gbk', index=True)
if mode == 1:
"""
明细数据
"""
mzdf4 = pd.read_csv('../data/mz4.csv').iloc[:, 2:]
mzdf5 = | pd.read_csv('../data/mz5.csv') | pandas.read_csv |
import subprocess
from datetime import datetime
import pandas as pd
def sacct_jobs(account_query, d_from, d_to='', debugging=False,
write_txt='', sacct_file='', serialize_frame=''):
"""Ingest job record information from slurm via sacct and return DataFrame.
Parameters
-------
account_query: str
String query to be sent to sacct via -A flag.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00'
d_to: optional, date str
End of the query period,
e.g. '2020-01-01T00:00:00' Defaults to now if empty.
debugging: boolean, optional
Boolean for reporting progress to stdout. Default False.
write_txt: str, optional
Writes the results of the raw sacct query to given file.
If empty, no file is created. Defaults to the empty string.
sacct_file: str, optional
Loads a raw query from file.
If empty, query is rerun. Defaults to the empty string.
serialize_frame: str, optional
Pickle the resulting DataFrame.
If empty, pickling is skipped. Defaults to the empty string.
Returns
-------
DataFrame
Returns a standard pandas DataFrame, or None if no jobs found.
"""
# d_to boilerplate
if d_to == '':
now = datetime.now()
d_to = now.strftime('%Y-%m-%dT%H:%M:%S')
headers = ['jobid', 'user', 'account', 'submit', 'start', 'end',
'ncpus', 'nnodes', 'reqmem', 'timelimit', 'state',
'reqgres', 'reqtres', 'priority', 'partition']
data = ''
if sacct_file == '':
if account_query != '':
base_cmd = ['sacct', '-aX', '-A', account_query, '-S',
d_from, '-E', d_to, '-p', '--delimiter',
'"|"', '-n', '--units=M']
else:
base_cmd = ['sacct', '-aX', '-S', d_from, '-E', d_to,
'-p', '--delimiter', '"|"', '-n', '--units=M']
base_cmd.append('-o')
base_cmd.append(','.join(headers)+'%36')
data = subprocess.check_output(base_cmd).decode('UTF-8')
if write_txt != '':
with open(write_txt, 'w') as f_id:
f_id.write('%s' % data)
else:
with open(sacct_file, 'r') as f_id:
data = f_id.read()
if debugging:
print('Done sacct query')
job_frame = pd.DataFrame([x.split('"|"') for x in data.split('\n')])
job_frame = job_frame.iloc[:, :-1] # Due to split implementation...
job_frame = job_frame.iloc[:-1, :] # Due to split implementation...
# Edge case before things start to happen...
if job_frame.empty:
return None
job_frame.columns = headers
# Align sacct to elasticsearch implementation
job_frame['reqcpus'] = pd.to_numeric(job_frame['ncpus'])
job_frame['nnodes'] = pd.to_numeric(job_frame['nnodes'])
job_frame['submit'] = pd.to_datetime(job_frame['submit'])
job_frame['start'] = pd.to_datetime(job_frame['start'], errors='coerce')
job_frame['end'] = pd.to_datetime(job_frame['end'], errors='coerce')
# Fix jobs with day limits and convert to timedelta
job_frame.update(job_frame.timelimit.loc[lambda x: x.str.contains('-')]
.str.replace('-', ' days '))
job_frame['timelimit'] = | pd.to_timedelta(job_frame['timelimit'], errors='coerce') | pandas.to_timedelta |
# Copyright 2018, <NAME>, <NAME>
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# filter future warnings
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from epee import *
import numpy as np
import pandas as pd
import argparse
import logging
import time
import os
import itertools
import multiprocessing
from time import localtime, strftime
# set tensorflow verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--conditiona", help="RNA-seq data for Condition A",
type=str, required=True)
parser.add_argument("-b", "--conditionb", help="RNA-seq data for Condition B",
type=str, required=True)
parser.add_argument("-na", "--networka", help="Network for condition A",
type=str, required=True)
parser.add_argument("-nb", "--networkb", help="Network for condition B",
type=str, required=True)
# DEFAULTS
parser.add_argument("-o", "--output", help="output directory", type=str,
default='')
parser.add_argument("-reg1", "--lregularization", help="lasso regularization \
parameter", type=float, default=0.01)
parser.add_argument("-reg2", "--gregularization", help="graph contrained \
regularization parameter", type=float, default=0.01)
parser.add_argument("-s", "--step", help="optimizer learning-rate",
type=float, default=0.0001)
parser.add_argument("-c", "--conditioning", help="Weight for the interactions \
not known", type=bool, default=True)
parser.add_argument("-r", "--runs", help="Number of independent runs", type=int,
default=20)
parser.add_argument("-i", "--iterations", help="Number of iterations",
type=int, default=100000)
parser.add_argument("-ag", "--aggregation", help="""
Method for aggregating runs. Default: "sum"
Valid options: {"mean", "median", "sum"} """,
type=str, default='sum')
parser.add_argument("-n", "--normalize", help="""
Weight normalization strategy. Default:"minmax"
Valid options: {"minmax", "log", "log10", "no"} """,
type=str, default='minmax')
parser.add_argument("-m", "--model", help="""
Model regularization choice. Default: "epee-gcl"
Valid options: {"epee-gcl","epee-l","no-penalty" """,
type=str, default='epee-gcl')
parser.add_argument("-v", "--verbose",
help="logging info levels 10, 20, or 30",
type=int, default=10)
# OPTIONAL SETTINGS
parser.add_argument("-eval", "--evaluate",
help="Evaluation mode available for Th1, Th2, Th17, \
Bmem, COAD, and AML",
type=str, default=None)
parser.add_argument("-pr", "--prefix",
help="Add prefix to the log",
type=str, default=strftime('%Y%m%d'))
# OPTIONAL FLAGS
parser.add_argument("-w", "--store_weights",
help="Store all the inferred weights",
action='store_true')
parser.add_argument("-mp", "--multiprocess",
help="multiprocess the calculation of perturb and \
regulator scores", action='store_true')
# NULL FLAG
parser.add_argument("-null", "--null",
help="Generate null scores by label permutation",
action='store_true')
# NULL SETTINGS
parser.add_argument("-d", "--seed", help="Starting seed number",
type=int, default=0)
parser.add_argument("-p", "--perturb", help="True label perturb scores. Required when running permutations for null model",
type=str, default=None)
parser.add_argument("-sg", "--shuffle_genes",
help="Generate null scores by gene permutation",
action='store_true')
def get_scores(sel):
"""To get perturb and regulator score"""
y1, w1, w1_df, y2, w2, w2_df, count = sel
# Calculate perturb scores
genescore_runi = get_perturb_scores(Y1, y1, X1, w1,
Y2, y2, X2, w2, S1, S2)
genescore_runi.columns = ['gene', 'set{}'.format(count)]
if args.null:
regscore_runi, diff_regs = get_diff_regulatory_activity(
actual_perturb['gene'][:1000],
w1_df, w2_df, top_regs=20)
else:
regscore_runi, diff_regs = get_diff_regulatory_activity(
genescore_runi['gene'][:1000],
w1_df, w2_df, top_regs=20)
regscore_runi.columns = ['gene', 'set{}'.format(count)]
return (genescore_runi, regscore_runi)
def run_epee():
"""To run EPEE with specified inputs."""
logging.info('SAMPLES: Y1: {} | Y2: {}'.format(Y1.shape[1], Y2.shape[1]))
logging.info('Tensorflow: {}'.format(tf.__version__))
logging.info('GENES: {}'.format(Y1.shape[0]))
logging.info('TFs: {}'.format(S1.shape[1]))
logging.info('MODEL LEARNING STARTED')
genescore_df = pd.DataFrame()
regscore_df = pd.DataFrame()
loss_runs = []
y1_s = []
y2_s = []
w1_s = []
w2_s = []
w1S1_s = []
w2S2_s = []
for rid in range(args.runs):
start = time.time()
logging.debug('Tensorflow: {}'.format(tf.__version__))
logging.debug('MODEL: {} learning Y1'.format(rid))
y1, w1, loss_arr1 = run_model(np.array(Y1), np.array(X1),
np.array(S1),
l_reg=args.lregularization,
g_reg=args.gregularization,
step=args.step,
itr=args.iterations,
log_itr=round(args.iterations/20),
seed=rid+args.seed,
model=args.model,
val=condition_val)
logging.debug('MODEL: {} learning Y2'.format(rid))
y2, w2, loss_arr2 = run_model(np.array(Y2), np.array(X2),
np.array(S2),
l_reg=args.lregularization,
g_reg=args.gregularization,
step=args.step,
itr=args.iterations,
log_itr=round(args.iterations/20),
seed=rid+args.seed,
model=args.model,
val=condition_val)
loss_runs.append((rid, loss_arr1[-1], loss_arr2[-1]))
# Calculate w1S1 and w2S2
w1_s1 = np.multiply(w1, S1)
w2_s2 = np.multiply(w2, S2)
w1_df = get_weights_df(w1_s1, Y1.index, X1.index)
w2_df = get_weights_df(w2_s2, Y2.index, X2.index)
w1o_df = get_weights_df(w1, Y1.index, X1.index)
w2o_df = get_weights_df(w2, Y2.index, X2.index)
# Store dataframes
y1_s.append(y1)
y2_s.append(y2)
w1_s.append(w1)
w2_s.append(w2)
w1S1_s.append(w1_df)
w2S2_s.append(w2_df)
# Output inferred weights if args.store_weights is True and args.null is False
if args.store_weights and not args.null:
w1o_df.to_csv('{}/model/w1_{}.txt'.format(outdir, rid),
sep='\t')
w2o_df.to_csv('{}/model/w2_{}.txt'.format(outdir, rid),
sep='\t')
if rid == 0:
S1.to_csv('{}/model/S1_input.txt'.format(outdir),
sep='\t')
S2.to_csv('{}/model/S2_input.txt'.format(outdir),
sep='\t')
X1.to_csv('{}/model/X1_input.txt'.format(outdir),
sep='\t')
X2.to_csv('{}/model/X2_input.txt'.format(outdir),
sep='\t')
Y1.to_csv('{}/model/Y1_input.txt'.format(outdir),
sep='\t')
Y2.to_csv('{}/model/Y2_input.txt'.format(outdir),
sep='\t')
end = time.time()
logging.info('MODEL: {} RUNTIME: {} mins'.format(rid,
round((end-start)/60, 2)))
# For each pairs of inferred weights calculate perturb and regulator scores
# logging.info('CALCULATE PERTURB AND REGULATOR SCORES')
logging.info('SCORES: pairwise comparision of all Y1 and Y2 models')
list_runs = list(range(args.runs))
pairs = list(itertools.product(list_runs, list_runs))
score_inputs = []
for count, p in enumerate(pairs):
m1, m2 = p
score_inputs.append((y1_s[m1], w1_s[m1], w1S1_s[m1],
y2_s[m2], w2_s[m2], w2S2_s[m2],
count))
if args.multiprocess:
cpu_count = multiprocessing.cpu_count()
p = multiprocessing.Pool(int(cpu_count/2))
out = p.map(get_scores, score_inputs)
else:
out = []
for i in score_inputs:
i_out = get_scores(i)
out.append(i_out)
for count, scores in enumerate(out):
genescore_runi, regscore_runi = scores
if count == 0:
genescore_df = genescore_runi.copy()
regscore_df = regscore_runi.copy()
else:
# if np.all(genescore_runi.index == genescore_df.index):
# genescore_df[genescore_runi.columns[1]] = genescore_runi.iloc[:, 1]
# else:
genescore_df = pd.merge(genescore_df, genescore_runi, on='gene')
# if np.all(regscore_runi.index == regscore_df.index):
# regscore_df[regscore_runi.columns[1]] = regscore_runi.iloc[:, 1]
# else:
regscore_df = pd.merge(regscore_df, regscore_runi, on='gene')
sum_genescore_df = get_summary_scoresdf(genescore_df, args.aggregation)
sum_regscore_df = get_summary_scoresdf(regscore_df, args.aggregation)
if args.null:
sum_regscore_df.to_csv('{}/null/regulator_scores_{}.txt'.format(
outdir, args.seed),
sep='\t')
sum_genescore_df.to_csv('{}/null/perturb_scores_{}.txt'.format(
outdir, args.seed),
sep='\t')
regscore_df.to_csv('{}/null/all_regulator_scores_{}.txt'.format(
outdir, args.seed),
sep='\t')
genescore_df.to_csv('{}/null/all_perturb_scores_{}.txt'.format(
outdir, args.seed),
sep='\t')
else:
sum_regscore_df.to_csv('{}/scores/regulator_scores.txt'.format(
outdir), sep='\t')
sum_genescore_df.to_csv('{}/scores/perturb_scores.txt'.format(
outdir), sep='\t')
regscore_df.to_csv('{}/scores/all_regulator_scores.txt'.format(
outdir), sep='\t')
genescore_df.to_csv('{}/scores/all_perturb_scores.txt'.format(
outdir), sep='\t')
loss_df = pd.DataFrame(loss_runs)
loss1_df = | pd.DataFrame(loss_arr1) | pandas.DataFrame |
from PIL import Image
from sklearn import datasets
import numpy as np
import pandas as pd
# For repeatable results from keras
from numpy.random import seed
seed(1)
from sklearn.model_selection import train_test_split,cross_validate,GridSearchCV, cross_val_predict,cross_val_score
from sklearn.metrics import f1_score,confusion_matrix,classification_report,roc_curve, auc, mean_squared_error,make_scorer,mean_absolute_error,r2_score
from sklearn.svm import SVR
import matplotlib.pyplot as plt
import statsmodels
from scipy.stats import zscore,norm
import itertools
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
from sklearn.datasets import fetch_openml
mnist_dataset = fetch_openml('mnist_784')
import os
import keras
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Input, Add, Dense, Activation,Dropout, BatchNormalization, Flatten, Conv2D, MaxPooling2D,Conv3D, MaxPooling3D
from keras import regularizers
from keras import backend as K
from keras.optimizers import Adam, SGD
from keras.callbacks import EarlyStopping
def create_train_data(X,y,testing_dataset_size=0.3):
training_dataset_size = 1-testing_dataset_size
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=testing_dataset_size,shuffle=True,random_state=42)
print("Dataset is randomly split into %s%% test dataset and %s%% train dataset"%(testing_dataset_size*100,training_dataset_size*100))
print("Total number of subjects in training dataset: %s"%len(X_train))
print("Total number of subjects in testing dataset: %s"%len(X_test))
return X_train, X_test, y_train, y_test
def tune_params(est,parameters,train_df,train_y,cross_val=5,njobs=-1,verbose=2):
scorer = make_scorer(mean_squared_error, greater_is_better=False)
est_grid = GridSearchCV(est, parameters,cv=cross_val,n_jobs=njobs,verbose=verbose,scoring=scorer)
est_grid.fit(train_df, train_y)
return est_grid.best_estimator_
def plot_regression_results(ax, y_true, y_pred, title, scores):
"""Scatter plot of the predicted vs true targets."""
ax.plot([y_true.min()-1, y_true.max()+1],[y_true.min()-1, y_true.max()+1],'--r', linewidth=2)
ax.scatter(y_true, y_pred, alpha=0.7)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', y_true.max()+1))
ax.spines['bottom'].set_position(('outward', y_true.max()+1))
ax.set_xlim([y_true.min(), y_true.max()])
ax.set_ylim([y_true.min(), y_true.max()])
ax.set_xlabel('Actual Value')
ax.set_ylabel('Predicted Value')
extra = plt.Rectangle((0, 0), 0, 0, fc="w", fill=False,edgecolor='none', linewidth=0)
ax.legend([extra], [scores], loc='upper left')
title = title
ax.set_title(title)
def load_pickle(filename):
model = pickle.load(open(filename, 'rb'))
return model
def save_pickle(model,filename):
pickle.dump(model,open(filename, 'wb'))
def concat_images(imga, imgb):
"""
Combines two color image ndarrays side-by-side.
"""
ha,wa = imga.shape[:2]
hb,wb = imgb.shape[:2]
max_height = np.max([ha, hb])
total_width = wa+wb
new_img = np.zeros(shape=(max_height, total_width))
new_img[:ha,:wa]=imga
new_img[:hb,wa:wa+wb]=imgb
return new_img
def extract_image(num,mnist_dataset,resize,new_shape,algo='svr'):
"""
Used for data simulation.Returns the image
num: string. The string of number and a digit is replaced with an actual image.
"""
target = mnist_dataset.target
mnist_image = mnist_dataset.data
img_shape = (int(np.sqrt(mnist_image.shape[-1])) , int(np.sqrt(mnist_image.shape[-1])))
image= []
for j in range(len(num)):
raw_img = mnist_image[np.argwhere(target == num[j])[0]][0]
raw_img = raw_img.reshape(img_shape)
new_img = Image.fromarray(raw_img)
img = new_img.resize(size=(new_shape[0],new_shape[1]))
img = np.asarray(img)
if j==0:
output = img
else:
output = concat_images(output, img)
if algo == 'svr':
output_ = output.flatten()
# if not cnn:
# output_ = output.flatten()
if algo == 'svr':
output = output_
image.append(output)
return np.concatenate(image)
def visualize_dataset(M,z,idx,shape):
"""
M: multi-dimensional mediator image
z:
idx: the index of image to visualize
shape: tuple of mediator shape
"""
import matplotlib.pyplot as plt
x,y = shape[0],shape[1]*4
plt.imshow(M[idx].reshape(x,y))
plt.title("The z value for this image is %s"%str(z[idx])[2:6])
plt.show()
def simulate_dataset(num_subs, resize,new_shape,visualize,algo='svr',alpha=0,std=1):
"""
Simulate the dataset.
num_subs :int; number of subjects;
resize: bool; if you want to resize the images;
new_shape: list of int; new shape of the images
"""
#Simulate X with fixed
X = np.random.normal(0,1,num_subs)
a0,e0 = -0.1,np.random.normal(0,std,num_subs)
m = a0 + alpha*X + e0
z = norm.cdf(m)
#Simulate images m
M = []
floating_pt = [str(i)[2:6].zfill(4) for i in z] # makes sure that there are 4 digits after decimal point
for num in floating_pt:
image = extract_image(num,mnist_dataset,resize,new_shape,algo)
M.append(image)
M = np.array(M) #multi-dimensional
if algo != 'svr':
M = M[:,:,:,np.newaxis] # for keras to add an extra dimension
#simulate Y ratings
beta,gamma,b0 = 4,5,6 #4,5,6
error = np.random.normal(0,std,num_subs)
Y = b0 + gamma*X + beta*m + error
df = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: | Series(()) | pandas.Series |
import math
import traceback
import click
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import tensorflow as tf
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
"""
plot the performance of core from TensorBoard Log History
"""
DEFAULT_SIZE_GUIDANCE = {
"scalars": 0,
"tensors": 0,
}
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c",
"#34495e", "#2ecc71", "#e67e22", "#f1c40f"]
material = ["#E91E63", "#FFC107", "#9C27B0", "#3F51B5",
"#2196F3", "#009688", "#795548", "#607D8B"]
sns.set(style="white", font_scale=1.0, rc={
"lines.linewidth": 1.2}, palette=sns.color_palette(flatui))
def plot_data(data, x_axis='num episodes', y_axis="average reward", hue="algorithm", smooth=1, ax=None, **kwargs):
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
x = np.asarray(datum[y_axis])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[y_axis] = smoothed_x
if isinstance(data, list):
data = pd.concat(data, ignore_index=True, sort=True)
sns.lineplot(data=data, x=x_axis, y=y_axis,
hue=hue, ci='sd', ax=ax, **kwargs)
# ax.legend(loc='best').set_draggable(True)
"""Spining up style"""
ax.legend(loc='upper center', ncol=9, handlelength=1, frameon=False,
mode="expand", borderaxespad=0.02, prop={'size': 8})
xscale = np.max(np.asarray(data[x_axis])) > 5e3
if xscale:
# Just some formatting niceness: x-axis scale in scientific notation if max x is large
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.tight_layout(pad=1.2)
def load_event_scalars(log_path):
feature = log_path.split(os.sep)[-1]
print(f"Processing logfile: {os.path.abspath(log_path)}")
if feature.find("_") != -1:
feature = feature.split("_")[-1]
df = | pd.DataFrame() | pandas.DataFrame |
import datetime
import pathlib
import pickle
from io import BytesIO
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import yaml
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset
from ml_tooling.logging import Log
from ml_tooling.metrics import Metrics, Metric
from ml_tooling.result import Result
from ml_tooling.search import Real
from ml_tooling.storage import FileStorage
from ml_tooling.transformers import DFStandardScaler, DFFeatureUnion
from ml_tooling.utils import MLToolingError, DatasetError
plt.switch_backend("agg")
class TestBaseClass:
def test_is_properties_works(
self, classifier: Model, regression: Model, pipeline_linear: Pipeline
):
assert classifier.is_regressor is False
assert classifier.is_classifier is True
assert regression.is_regressor is True
assert regression.is_classifier is False
assert classifier.is_pipeline is False
assert regression.is_pipeline is False
pipeline = Model(pipeline_linear)
assert pipeline.is_pipeline is True
def test_instantiate_model_with_non_estimator_pipeline_fails(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
with pytest.raises(
MLToolingError,
match="You passed a Pipeline without an estimator as the last step",
):
Model(example_pipe)
def test_instantiate_model_with_feature_pipeline_sets_estimator_correctly(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
clf = LinearRegression()
model = Model(clf, feature_pipeline=example_pipe)
expected = Pipeline([("features", example_pipe), ("estimator", clf)])
assert model.estimator.steps == expected.steps
def test_instantiate_model_with_other_object_fails(self):
with pytest.raises(
MLToolingError,
match="Expected a Pipeline or Estimator - got <class 'dict'>",
):
Model({})
def test_default_metric_getter_works_as_expected_classifier(self):
rf = Model(RandomForestClassifier(n_estimators=10))
assert rf.config.CLASSIFIER_METRIC == "accuracy"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "accuracy"
rf.default_metric = "fowlkes_mallows_score"
assert rf.config.CLASSIFIER_METRIC == "fowlkes_mallows_score"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "fowlkes_mallows_score"
rf.config.reset_config()
def test_default_metric_getter_works_as_expected_regressor(self):
linreg = Model(LinearRegression())
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "r2"
assert linreg.default_metric == "r2"
linreg.default_metric = "neg_mean_squared_error"
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "neg_mean_squared_error"
assert linreg.default_metric == "neg_mean_squared_error"
linreg.config.reset_config()
def test_default_metric_works_as_expected_without_pipeline(self):
rf = Model(RandomForestClassifier(n_estimators=10))
linreg = Model(LinearRegression())
assert "accuracy" == rf.default_metric
assert "r2" == linreg.default_metric
rf.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == rf.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
rf.config.reset_config()
linreg.config.reset_config()
def test_default_metric_works_as_expected_with_pipeline(
self, pipeline_logistic: Pipeline, pipeline_linear: Pipeline
):
logreg = Model(pipeline_logistic)
linreg = Model(pipeline_linear)
assert "accuracy" == logreg.default_metric
assert "r2" == linreg.default_metric
logreg.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == logreg.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
logreg.config.reset_config()
linreg.config.reset_config()
def test_regression_model_can_be_saved(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
classifier.score_estimator(train_iris_dataset)
load_storage = FileStorage(tmp_path)
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
loaded_model = classifier.load_estimator(saved_model_path, storage=load_storage)
assert loaded_model.estimator.get_params() == classifier.estimator.get_params()
def test_regression_model_filename_is_generated_correctly(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
assert datetime.datetime.strptime(
saved_model_path.stem, f"{classifier.estimator_name}_%Y_%m_%d_%H_%M_%S_%f"
)
def test_save_model_saves_pipeline_correctly(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
saved_model_path = model.save_estimator(FileStorage(tmp_path))
assert saved_model_path.exists()
@patch("ml_tooling.logging.log_estimator.get_git_hash")
def test_save_estimator_saves_logging_dir_correctly(
self, mock_hash: MagicMock, classifier: Model, tmp_path: pathlib.Path
):
mock_hash.return_value = "1234"
with classifier.log(str(tmp_path)):
expected_file = classifier.save_estimator(FileStorage(tmp_path))
assert expected_file.exists()
assert (
"LogisticRegression" in [str(file) for file in tmp_path.rglob("*.yaml")][0]
)
mock_hash.assert_called_once()
def test_save_estimator_with_prod_flag_saves_correctly(self, classifier: Model):
mock_storage = MagicMock()
classifier.save_estimator(mock_storage, prod=True)
mock_storage.save.assert_called_once_with(
classifier.estimator, "production_model.pkl", prod=True
)
def test_save_estimator_uses_default_storage_if_no_storage_is_passed(
self, tmp_path: pathlib.Path, classifier: Model
):
classifier.config.ESTIMATOR_DIR = tmp_path
classifier.save_estimator()
models = classifier.config.default_storage.get_list()
assert len(models) == 1
new_classifier = Model.load_estimator(models[0])
assert (
classifier.estimator.get_params() == new_classifier.estimator.get_params()
)
@patch("ml_tooling.baseclass.import_path")
def test_can_load_production_estimator(
self, mock_path: MagicMock, classifier: Model
):
buffer = BytesIO()
pickle.dump(classifier.estimator, buffer)
buffer.seek(0)
mock_path.return_value.__enter__.return_value = buffer
model = Model.load_production_estimator("test")
assert isinstance(model, Model)
assert isinstance(model.estimator, BaseEstimator)
def test_log_context_manager_works_as_expected(self, regression: Model):
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
with regression.log("test"):
assert regression.config.LOG is True
assert "test" == regression.config.RUN_DIR.name
assert "runs" == regression.config.RUN_DIR.parent.name
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
assert "test" not in regression.config.RUN_DIR.parts
def test_log_context_manager_logs_when_scoring_model(
self, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(LinearRegression())
runs = tmp_path / "runs"
with model.log(str(runs)):
result = model.score_estimator(train_iris_dataset)
for file in runs.rglob("LinearRegression_*"):
with file.open() as f:
log_result = yaml.safe_load(f)
assert result.metrics.score == log_result["metrics"]["r2"]
assert result.model.estimator_name == log_result["estimator_name"]
def test_test_models_logs_when_given_dir(
self, tmp_path: pathlib.Path, train_iris_dataset
):
test_models_log = tmp_path / "test_estimators"
Model.test_estimators(
train_iris_dataset,
[
RandomForestClassifier(n_estimators=10),
DummyClassifier(strategy="prior"),
],
log_dir=str(test_models_log),
metrics="accuracy",
)
for file in test_models_log.rglob("*.yaml"):
with file.open() as f:
result = yaml.safe_load(f)
model_name = result["model_name"]
assert model_name in {
"IrisData_RandomForestClassifier",
"IrisData_DummyClassifier",
}
def test_dump_serializes_correctly_without_pipeline(self, regression: Model):
serialized_model = regression.to_dict()
expected = [
{
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
}
]
assert serialized_model == expected
def test_dump_serializes_correctly_with_pipeline(self, pipeline_linear: Pipeline):
serialized_model = Model(pipeline_linear).to_dict()
expected = [
{
"name": "scale",
"module": "sklearn.preprocessing._data",
"classname": "StandardScaler",
"params": {"copy": True, "with_mean": True, "with_std": True},
},
{
"name": "estimator",
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
},
]
assert serialized_model == expected
def test_to_dict_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion
):
model = Model(feature_union_classifier)
result = model.to_dict()
assert len(result) == 2
union = result[0]
assert union["name"] == "features"
assert len(union["params"]) == 2
pipe1 = union["params"][0]
pipe2 = union["params"][1]
assert pipe1["name"] == "pipe1"
select1 = pipe1["params"][0]
scale1 = pipe1["params"][1]
assert select1["name"] == "select"
assert select1["classname"] == "Select"
assert select1["params"] == {
"columns": ["sepal length (cm)", "sepal width (cm)"]
}
assert scale1["name"] == "scale"
assert scale1["classname"] == "DFStandardScaler"
assert scale1["params"] == {"copy": True, "with_mean": True, "with_std": True}
assert pipe2["name"] == "pipe2"
select2 = pipe2["params"][0]
scale2 = pipe2["params"][1]
assert select2["name"] == "select"
assert select2["classname"] == "Select"
assert select2["params"] == {
"columns": ["petal length (cm)", "petal width (cm)"]
}
assert scale2["name"] == "scale"
assert scale2["classname"] == "DFStandardScaler"
assert scale2["params"] == {"copy": True, "with_mean": True, "with_std": True}
def test_from_yaml_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion, tmp_path: pathlib.Path
):
model = Model(feature_union_classifier)
result = model.to_dict()
log = Log(
name="test", metrics=Metrics.from_list(["accuracy"]), estimator=result
)
log.save_log(tmp_path)
new_model = Model.from_yaml(log.output_path)
assert len(new_model.estimator.steps[0][1].transformer_list) == 2
new_steps = new_model.estimator.steps
old_steps = model.estimator.steps
assert new_steps[0][0] == old_steps[0][0]
assert isinstance(new_steps[0][1], type(old_steps[0][1]))
new_union = new_steps[0][1].transformer_list
old_union = old_steps[0][1].transformer_list
assert len(new_union) == len(old_union)
for new_transform, old_transform in zip(new_union, old_union):
assert new_transform[1].steps[0][0] == old_transform[1].steps[0][0]
assert (
new_transform[1].steps[0][1].get_params()
== old_transform[1].steps[0][1].get_params()
)
def test_can_load_serialized_model_from_pipeline(
self, pipeline_linear: Pipeline, tmp_path: pathlib.Path
):
model = Model(pipeline_linear)
log = Log(
name="test",
estimator=model.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
for model1, model2 in zip(model.estimator.steps, model2.estimator.steps):
assert model1[0] == model2[0]
assert model1[1].get_params() == model2[1].get_params()
def test_can_load_serialized_model_from_estimator(
self, classifier: Model, tmp_path: pathlib.Path
):
log = Log(
name="test",
estimator=classifier.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
assert model2.estimator.get_params() == classifier.estimator.get_params()
class TestTrainEstimator:
def test_train_model_sets_result_to_none(
self, regression: Model, train_iris_dataset
):
assert regression.result is not None
regression.train_estimator(train_iris_dataset)
assert regression.result is None
def test_train_model_followed_by_score_model_returns_correctly(
self, pipeline_logistic: Pipeline, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
model.score_estimator(train_iris_dataset)
assert isinstance(model.result, Result)
def test_train_model_errors_correctly_when_not_scored(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
with pytest.raises(MLToolingError, match="You haven't scored the estimator"):
with model.log(str(tmp_path)):
model.train_estimator(train_iris_dataset)
model.save_estimator(FileStorage(tmp_path))
def test_can_score_estimator_with_no_y_value(self):
class DummyEstimator(BaseEstimator, RegressorMixin):
def __init__(self):
self.average = None
def fit(self, x, y=None):
self.average = np.mean(x, axis=0)
return self
def predict(self, x):
return self.average
class DummyData(Dataset):
def load_training_data(self):
return | pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 19:28:36 2019
@author: github.com/sahandv
"""
import sys
import gc
from tqdm import tqdm
import pandas as pd
import numpy as np
import re
from sciosci.assets import text_assets as kw
from sciosci.assets import keyword_dictionaries as kd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
tqdm.pandas()
nltk.download('wordnet')
nltk.download('punkt')
# =============================================================================
# Read data and Initialize
# =============================================================================
year_from = 0
year_to = 2021
MAKE_SENTENCE_CORPUS = False
MAKE_SENTENCE_CORPUS_ADVANCED_KW = False
MAKE_SENTENCE_CORPUS_ADVANCED = False
MAKE_REGULAR_CORPUS = True
GET_WORD_FREQ_IN_SENTENCE = False
PROCESS_KEYWORDS = False
stops = ['a','an','we','result','however','yet','since','previously','although','propose','proposed','this','...']
nltk.download('stopwords')
stop_words = list(set(stopwords.words("english")))+stops
data_path_rel = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/kpris_data.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 4k/scopus_4k.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/AI ALL 1900-2019 - reformat'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 300/merged - scopus_v2_relevant wos_v1_relevant - duplicate doi removed - abstract corrected - 05 Aug 2019.csv'
data_full_relevant = pd.read_csv(data_path_rel)
# data_full_relevant = data_full_relevant[['dc:title','authkeywords','abstract','year']]
# data_full_relevant.columns = ['TI','DE','AB','PY']
sample = data_full_relevant.sample(4)
root_dir = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/'
subdir = 'clean/' # no_lemmatization_no_stopwords
gc.collect()
data_full_relevant['PY'] = 2018
data_full_relevant['AB'] = data_full_relevant['abstract']
data_full_relevant['TI'] = ''
data_full_relevant['DE'] = np.nan
data_full_relevant['ID'] = ''
data_full_relevant['SO'] = ''
#
data_wrong = data_full_relevant[data_full_relevant['AB'].str.contains("abstract available")].index
data_wrong = list(data_wrong)
data_full_relevant = data_full_relevant.drop(data_wrong,axis=0)
# =============================================================================
# Initial Pre-Processing :
# Following tags requires WoS format. Change them otherwise.
# =============================================================================
data_filtered = data_full_relevant.copy()
data_filtered = data_filtered[pd.notnull(data_filtered['PY'])]
data_filtered = data_filtered[data_filtered['PY'].astype('int')>year_from-1]
data_filtered = data_filtered[data_filtered['PY'].astype('int')<year_to]
# Remove columns without keywords/abstract list
data_with_keywords = data_filtered[pd.notnull(data_filtered['DE'])]
data_with_abstract = data_filtered[pd.notnull(data_filtered['AB'])]
# Remove special chars and strings from abstracts
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_c(x) if pd.notnull(x) else np.nan).str.lower()
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'et al.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'eg.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'ie.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'vs.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'ieee') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'fig.','figure') if pd.notnull(x) else np.nan)
# Remove numbers from abstracts to eliminate decimal points and other unnecessary data
# gc.collect()
abstracts = []
for abstract in tqdm(data_with_abstract['AB'].values.tolist()):
numbers = re.findall(r"[-+]?\d*\.\d+|\d+", abstract)
for number in numbers:
abstract = kw.find_and_remove_term(abstract,number)
abstracts.append(abstract)
data_with_abstract['AB'] = abstracts.copy()
del abstracts
source_list = pd.DataFrame(data_with_abstract['SO'].values.tolist(),columns=['source'])
source_list.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sources',index=False) # Save year indices to disk for further use
year_list = pd.DataFrame(data_with_abstract['PY'].values.tolist(),columns=['year'])
year_list.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus years',index=False) # Save year indices to disk for further use
gc.collect()
# =============================================================================
# Sentence maker
# =============================================================================
if MAKE_SENTENCE_CORPUS is True:
thesaurus = pd.read_csv('data/thesaurus/thesaurus_for_ai_keyword_with_() (training).csv')
thesaurus = thesaurus.fillna('')
print("\nSentence maker and thesaurus matching. \nThis will take some time...")
data_with_abstract['AB_no_c'] = data_with_abstract['AB'].apply(lambda x: kw.find_and_remove_c(x) if pd.notnull(x) else np.nan)
sentence_corpus = []
for index,row in tqdm(data_with_abstract.iterrows(),total=data_with_abstract.shape[0]):
words = re.split('( |\\n|\.|\?|!|:|;|,|_|\[|\])',row['AB_no_c'].lower())
new_words = []
year = row['PY']
flag_word_removed = False
for w_idx,word in enumerate(words):
if flag_word_removed is True:
if word==' ':
flag_word_removed = False
continue
if word in thesaurus['alt'].values.tolist():
word_old = word
buffer_word = word
word = thesaurus[thesaurus['alt']==word]['original'].values.tolist()[0]
# print("changed '",word_old,"' to '",word,"'.")
new_words.append(word)
row = ''.join(new_words)
sentences = re.split('(\. |\? |\\n)',row)
sentences = [i+j for i,j in zip(sentences[0::2], sentences[1::2])]
for sentence_n in sentences:
sentence_corpus.append([index,sentence_n,year])
sentence_corpus = pd.DataFrame(sentence_corpus,columns=['article_index','sentence','year'])
sentence_corpus.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
gc.collect()
# =============================================================================
# Sentence maker -- Advanced --
# =============================================================================
if MAKE_SENTENCE_CORPUS_ADVANCED is True:
data_with_abstract['TI_AB'] = data_with_abstract.TI.map(str) + ". " + data_with_abstract.AB
data_fresh = data_with_abstract[['TI_AB','PY']].copy()
data_fresh['TI_AB'] = data_fresh['TI_AB'].str.lower()
del data_with_abstract
gc.collect()
data_tmp = data_fresh[1:10]
data_fresh[-2:-1]
print("\nSentence extraction")
sentences = []
years = []
indices = []
for index,row in tqdm(data_fresh.iterrows(),total=data_fresh.shape[0]):
abstract_str = row['TI_AB']
year = row['PY']
abstract_sentences = re.split('\. |\? |\\n',abstract_str)
length = len(abstract_sentences)
sentences.extend(abstract_sentences)
years.extend([year for x in range(length)])
indices.extend([index for x in range(length)])
print("\nTokenizing")
tmp = []
for sentence in tqdm(sentences):
tmp.append(word_tokenize(sentence))
sentences = tmp.copy()
del tmp
print("\nString pre processing for abstracts: lower and strip")
sentences = [list(map(str.lower, x)) for x in sentences]
sentences = [list(map(str.strip, x)) for x in sentences]
tmp = []
print("\nString pre processing for abstracts: lemmatize and stop word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp.append(tmp_list)
sentences = tmp.copy()
del tmp
gc.collect()
tmp = []
print("\nString pre processing for abstracts: null word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp.append([x for x in string_list if x!=''])
sentences = tmp.copy()
del tmp
print("\nThesaurus matching")
sentences = kw.thesaurus_matching(sentences,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (training).csv')
print("\nStitiching tokens")
tmp = []
for words in tqdm(sentences, total=len(sentences)):
tmp.append(' '.join(words))
sentences = tmp.copy()
del tmp
print("\nGB to US")
tmp = []
for sentence in tqdm(sentences, total=len(sentences)):
tmp.append(kw.replace_british_american(sentence,kd.gb2us))
sentences = tmp.copy()
del tmp
sentence_df = pd.DataFrame(indices,columns=['article_index'])
sentence_df['sentence'] = sentences
sentence_df['year'] = years
sentence_df.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
# =============================================================================
# Keyword Extractor
# =============================================================================
if MAKE_SENTENCE_CORPUS_ADVANCED_KW is True:
data_with_abstract['TI_AB'] = data_with_abstract.AB
data_fresh = data_with_abstract[['TI_AB','PY']].copy()
data_fresh['TI_AB'] = data_fresh['TI_AB'].str.lower()
del data_with_abstract
gc.collect()
data_tmp = data_fresh[1:10]
data_fresh[-2:-1]
print("\nSentence extraction")
sentences = []
years = []
indices = []
for index,row in tqdm(data_fresh.iterrows(),total=data_fresh.shape[0]):
abstract_str = row['TI_AB']
year = row['PY']
abstract_sentences = re.split('\\n',abstract_str)
length = len(abstract_sentences)
sentences.extend(abstract_sentences)
years.extend([year for x in range(length)])
indices.extend([index for x in range(length)])
print("\nTokenizing")
tmp = []
for sentence in tqdm(sentences):
tmp.append(word_tokenize(sentence))
sentences = tmp.copy()
del tmp
print("\nString pre processing for abstracts: lower and strip")
sentences = [list(map(str.lower, x)) for x in sentences]
sentences = [list(map(str.strip, x)) for x in sentences]
tmp = []
print("\nString pre processing for abstracts: lemmatize and stop word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp.append(tmp_list)
sentences = tmp.copy()
del tmp
gc.collect()
tmp = []
print("\nString pre processing ")
for string_list in tqdm(sentences, total=len(sentences)):
string_tmp = []
for token in string_list:
if token == '':
string_tmp.append(' | ')
else:
string_tmp.append(token)
tmp.append(string_tmp)
sentences = tmp.copy()
del tmp
tmp = []
print("\nString pre processing for abstracts: null word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp.append([x for x in string_list if x!=''])
sentences = tmp.copy()
del tmp
print("\nThesaurus matching")
sentences = kw.thesaurus_matching(sentences,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
print("\nStitiching tokens")
tmp = []
for words in tqdm(sentences, total=len(sentences)):
tmp.append(' '.join(words))
sentences = tmp.copy()
del tmp
print("\nGB to US")
tmp = []
for sentence in tqdm(sentences, total=len(sentences)):
tmp.append(kw.replace_british_american(sentence,kd.gb2us))
sentences = tmp.copy()
del tmp
sentence_df = pd.DataFrame(indices,columns=['article_index'])
sentence_df['sentence'] = sentences
sentence_df['year'] = years
sentence_df.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
if MAKE_REGULAR_CORPUS is False:
sys.exit('Did not continue to create normal corpus. If you want a corpus, set it to True at init section.')
# =============================================================================
# Get word frequency in sentence corpus -- OPTIONAL
# =============================================================================
if GET_WORD_FREQ_IN_SENTENCE is True:
import pandas as pd
import numpy as np
from tqdm import tqdm
file = root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus abstract-title'#'/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/corpus/AI ALL/1900-2019 corpus sentences abstract-title'
file = pd.read_csv(file)
size = 500000
unique = []
for data_start_point in tqdm(np.arange(0,file.shape[0],size)):
if data_start_point+size<file.shape[0]:
end_point = data_start_point+size
else:
end_point = file.shape[0]-1
# print(data_start_point,end_point)
str_split = list(file.sentence[data_start_point:end_point].str.split())
str_flat = pd.DataFrame([item for sublist in str_split for item in sublist])
str_flat.columns = ['words']
str_flat.head()
unique = unique+list(str_flat.words.unique())
unique = pd.DataFrame(unique)
unique.columns = ['words']
unique = list(unique.words.unique())
len(unique)
# =============================================================================
# Tokenize (Author Keywords and Abstracts+Titles)
# =============================================================================
abstracts = []
keywords = []
keywords_index = []
abstracts_pure = []
data_with_abstract['ID'] = ''
data_with_abstract['DE'] = ''
data_with_abstract['TI'] = ''
for index,paper in tqdm(data_with_abstract.iterrows(),total=data_with_abstract.shape[0]):
keywords_str = paper['DE']
keywords_index_str = paper['ID']
abstract_str = paper['AB']
title_str = paper['TI']
abstract_dic = word_tokenize(title_str+' '+abstract_str)
abstract_dic_pure = abstract_dic.copy()
if pd.notnull(paper['DE']):
keywords_dic = word_tokenize(keywords_str)
keywords.append(keywords_str.split(';'))
abstract_dic.extend(keywords_dic)
else:
keywords.append([])
if pd.notnull(paper['ID']):
keywords_index.append(keywords_index_str.split(';'))
else:
keywords_index.append([])
abstracts.append(abstract_dic)
abstracts_pure.append(abstract_dic_pure)
# Add to main df. Not necessary
data_with_abstract['AB_split'] = abstracts_pure
data_with_abstract['AB_KW_split'] = abstracts
# =============================================================================
# Strip and lowe case
# =============================================================================
abstracts_pure = [list(map(str.strip, x)) for x in abstracts_pure]
abstracts_pure = [list(map(str.lower, x)) for x in abstracts_pure]
abstracts = [list(map(str.strip, x)) for x in abstracts]
abstracts = [list(map(str.lower, x)) for x in abstracts]
keywords = [list(map(str.strip, x)) for x in keywords]
keywords = [list(map(str.lower, x)) for x in keywords]
keywords_index = [list(map(str.strip, x)) for x in keywords_index]
keywords_index = [list(map(str.lower, x)) for x in keywords_index]
# =============================================================================
# Pre Process
# =============================================================================
tmp_data = []
print("\nString pre processing for ababstracts_purestracts")
for string_list in tqdm(abstracts, total=len(abstracts)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp_data.append(tmp_list)
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(abstracts_pure, total=len(abstracts_pure)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp_data.append(tmp_list)
abstracts_pure = tmp_data.copy()
del tmp_data
if PROCESS_KEYWORDS is True:
print("\nString pre processing for keywords")
tmp_data = []
for string_list in tqdm(keywords, total=len(keywords)):
tmp_list = []
for string in string_list:
tmp_sub_list = string.split()
tmp_list.append(' '.join([kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tmp_sub_list]))
tmp_data.append(tmp_list)
keywords = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords_index, total=len(keywords_index)):
tmp_list = []
for string in string_list:
tmp_sub_list = string.split()
tmp_list.append(' '.join([kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tmp_sub_list]))
tmp_data.append(tmp_list)
keywords_index = tmp_data.copy()
del tmp_data
#tmp_data = []
#for string_list in tqdm(keywords, total=len(keywords)):
# tmp_list = []
# for sub_string_list in string_list:
# tmp_list.append(' '.join(sub_string_list))
# tmp_data.append(tmp_list)
#keywords = tmp_data.copy()
#del tmp_data
# =============================================================================
# Clean-up dead words
# =============================================================================
tmp_data = []
for string_list in tqdm(abstracts, total=len(abstracts)):
tmp_data.append([x for x in string_list if x!=''])
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(abstracts_pure, total=len(abstracts_pure)):
tmp_data.append([x for x in string_list if x!=''])
abstracts_pure = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords, total=len(keywords)):
tmp_data.append([x for x in string_list if x!=''])
keywords = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords_index, total=len(keywords_index)):
tmp_data.append([x for x in string_list if x!=''])
keywords_index = tmp_data.copy()
del tmp_data
# =============================================================================
# Break-down abstracts again
# =============================================================================
tmp_data = []
for abstract in tqdm(abstracts):
words = []
for word in abstract:
words = words+word.split()
tmp_data.append(words)
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for abstract in tqdm(abstracts_pure):
words = []
for word in abstract:
words = words+word.split()
tmp_data.append(words)
abstracts_pure = tmp_data.copy()
del tmp_data
# =============================================================================
# Thesaurus matching
# =============================================================================
print("\nThesaurus matching")
abstracts_backup = abstracts.copy()
abstracts_pure_backup = abstracts_pure.copy()
keywords_backup = keywords.copy()
keywords_index_backup = keywords_index.copy()
abstracts = abstracts_backup.copy()
abstracts_pure = abstracts_pure_backup.copy()
keywords = keywords_backup.copy()
keywords_index = keywords_index_backup.copy()
abstracts = kw.thesaurus_matching(abstracts,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
abstracts_pure = kw.thesaurus_matching(abstracts_pure,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
if PROCESS_KEYWORDS is True:
keywords = kw.thesaurus_matching(keywords)
keywords_index = kw.thesaurus_matching(keywords_index)
# =============================================================================
# Term to string corpus for co-word analysis
# =============================================================================
print("\nTerm to string corpus")
corpus_abstract = []
for words in tqdm(abstracts, total=len(abstracts)):
corpus_abstract.append(' '.join(words))
corpus_abstract_pure = []
for words in tqdm(abstracts_pure, total=len(abstracts_pure)):
corpus_abstract_pure.append(' '.join(words))
corpus_keywords = []
for words in tqdm(keywords, total=len(keywords)):
corpus_keywords.append(';'.join(words))
corpus_keywords_index = []
for words in tqdm(keywords_index, total=len(keywords_index)):
corpus_keywords_index.append(';'.join(words))
# =============================================================================
# Remove substrings :
# be careful with this one! It might remove parts of a string or half of a word
# =============================================================================
thesaurus = | pd.read_csv('data/thesaurus/to_remove.csv') | pandas.read_csv |
from . import analysis
import numpy as np
import pandas as pd
def geneify(df, tx_df):
if df.columns.nlevels>1:
tx_df_multiindex=tx_df.copy()
tx_df_multiindex.columns=pd.MultiIndex.from_product([tx_df.columns]+[['']]*(len(df.columns.levels)-1))
else:
tx_df_multiindex=tx_df
df_withinfo=pd.concat([tx_df_multiindex,df], axis=1, join='inner').groupby('ENSG').sum()
return df_withinfo
def add_gene_info(df, genes_df, join='outer'):
if df.columns.nlevels>1:
genes_df_multiindex=genes_df.copy()
genes_df_multiindex.columns=pd.MultiIndex.from_product([genes_df.columns]+[['']]*(len(df.columns.levels)-1))
else:
genes_df_multiindex=genes_df
df_withinfo=pd.concat([genes_df_multiindex,df], axis=1, join=join)
return df_withinfo
def rna_stats(exons_stats_file, introns_stats_file, intergene_stats_file, tx_df, genes_df, chr_norm):
np.seterr(divide='ignore', invalid='ignore')
data_exons=pd.read_csv(exons_stats_file, delimiter=" ", header=None, names=['Count','ENST','uniqmapping','istrans'])
data_tx=data_exons.join(tx_df, on='ENST').drop('ENST',axis=1).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_intergenic=pd.read_csv(intergene_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans']).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_gb=pd.read_csv(introns_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans']).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
mydf={}
n=['exons','introns']
for nn, data_bygene in zip(n,[data_tx, data_gb]):
unq=data_bygene.loc[:, (slice(None),[1],slice(None))].sum(axis=1,level=2)
alls=data_bygene.sum(axis=1,level=2)
unq['NumContacts']=unq.sum(axis=1)
alls['NumContacts']=alls.sum(axis=1)
mydf[nn]=pd.concat([unq,alls], axis=1, keys=['unq','all']).fillna(0)
# x=add_gene_info(mydf, genes_df).sort_values(('unq','tot'), ascending=False)
x0=pd.concat([mydf[k] for k in n], keys=n, axis=1).fillna(0).reorder_levels([1,0,2],axis=1) #.sort_index(level=[0,1], axis=1)
x=add_gene_info(x0,genes_df[['chr']], join='inner')
mynorm=x['chr'].map(chr_norm).values
for k in ['unq','all']:
for kk in [-1,0,1,'NumContacts']:
x[(k,"all",kk)]=x[(k,'exons',kk)]+x[(k,'introns',kk)]
# for kk in [-1,0,1]:
for hh in ['exons','introns','all']:
x[(k,hh,-1)]=x[(k,hh,-1)].values/x[(k,hh,'NumContacts')].values*100 #ambiguous
Ntrans=x[(k,hh,1)].values
Ncis=x[(k,hh,0)].values
x[(k,hh,0)]=Ntrans/(Ntrans+Ncis)*100 #trans
x[(k,hh,1)]=(Ntrans/Ncis)*mynorm #trans
Nreads=x[(k,hh,'NumContacts')].sum()/1000000
x[(k,hh,'CPM')]=x[(k,hh,'NumContacts')]/Nreads
x[(k,'all','%exons')]=x[(k,'exons','NumContacts')].values/(x[(k,'exons','NumContacts')].values+x[(k,'introns','NumContacts')].values)*100
x.rename(columns={-1:'%ambiguousDNA',0:'%trans',1:'trans_chr_score'}, inplace=True, level=2)
x.sort_index(level=[1,2], axis=1, inplace=True)
data_all=x.xs('all', level=0, axis=1).sort_values(('all','NumContacts'), ascending=False).reindex(['NumContacts','CPM','%exons','%ambiguousDNA','%trans','trans_chr_score'], axis=1, level=1)
data_unq=x.xs('all', level=0, axis=1).sort_values(('all','NumContacts'), ascending=False).reindex(['NumContacts','CPM','%exons','%ambiguousDNA','%trans','trans_chr_score'], axis=1, level=1)
data_intergenic.columns=data_intergenic.columns.droplevel([0,1])
ig_tot=data_intergenic.sum(axis=1)
# # for c in [-1,0,1]:
# # data_intergenic[c]=data_intergenic[c].values/data_intergenic['tot'].values*100
ig_Ntrans=data_intergenic[1].values
ig_Ncis=data_intergenic[0].values
ig_Nambig=data_intergenic[-1].values
data_intergenic[0]=ig_Ntrans/(ig_Ntrans+ig_Ncis)*100 #trans
data_intergenic[-1]=ig_Nambig/ig_tot #trans
data_intergenic[1]=ig_tot
# # data_intergenic.drop([1], axis=1)
# # Nreads=x[(k,hh,'tot')].sum()/1000000
data_intergenic.rename(columns={-1:'%ambiguousDNA',0:'%trans',1:'N'}, inplace=True)
data_intergenic.columns.names=[None]
return data_unq, data_all, data_intergenic
def rna_stats2(stats_file, tx_df, genes_df, chr_norm, tx_start='ENST', gene_start='ENSG'):
np.seterr(divide='ignore', invalid='ignore')
data=pd.read_csv(stats_file, delimiter=" ", header=None, names=['Count','ENSN','uniqmapping','istrans'])
data_exons=data.loc[data['ENSN'].str.startswith(tx_start)].copy()
data_exons.rename(columns={"ENSN": "ENST"}, inplace=True)
# data_exons=pd.read_csv(exons_stats_file, delimiter=" ", header=None, names=['Count','ENST','uniqmapping','istrans'])
data_tx=data_exons.join(tx_df, on='ENST').drop('ENST',axis=1).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_intergenic=data.loc[data['ENSN'].str.startswith('*')].copy()
data_intergenic.rename(columns={"ENSN": "ENSG"}, inplace=True)
data_intergenic=data_intergenic.groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
# pd.read_csv(intergene_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans']).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_gb=data.loc[data['ENSN'].str.startswith(gene_start)].copy()
data_gb.rename(columns={"ENSN": "ENSG"}, inplace=True)
data_gb=data_gb.groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
# pd.read_csv(introns_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans'])
mydf={}
n=['exons','introns']
for nn, data_bygene in zip(n,[data_tx, data_gb]):
unq=data_bygene.loc[:, (slice(None),[1],slice(None))].sum(axis=1,level=2)
alls=data_bygene.sum(axis=1,level=2)
unq['NumContacts']=unq.sum(axis=1)
alls['NumContacts']=alls.sum(axis=1)
mydf[nn]=pd.concat([unq,alls], axis=1, keys=['unq','all']).fillna(0)
# x=add_gene_info(mydf, genes_df).sort_values(('unq','tot'), ascending=False)
x0=pd.concat([mydf[k] for k in n], keys=n, axis=1).fillna(0).reorder_levels([1,0,2],axis=1) #.sort_index(level=[0,1], axis=1)
x=add_gene_info(x0,genes_df[['chr']], join='inner')
mynorm=x['chr'].map(chr_norm).values
for k in ['unq','all']:
for kk in [-1,0,1,'NumContacts']:
x[(k,"all",kk)]=x[(k,'exons',kk)]+x[(k,'introns',kk)]
# for kk in [-1,0,1]:
for hh in ['exons','introns','all']:
x[(k,hh,-1)]=x[(k,hh,-1)].values/x[(k,hh,'NumContacts')].values*100 #ambiguous
Ntrans=x[(k,hh,1)].values
Ncis=x[(k,hh,0)].values
x[(k,hh,0)]=Ntrans/(Ntrans+Ncis)*100 #trans
x[(k,hh,1)]=(Ntrans/Ncis)*mynorm #trans
Nreads=x[(k,hh,'NumContacts')].sum()/1000000
x[(k,hh,'CPM')]=x[(k,hh,'NumContacts')]/Nreads
x[(k,'all','%exons')]=x[(k,'exons','NumContacts')].values/(x[(k,'exons','NumContacts')].values+x[(k,'introns','NumContacts')].values)*100
x.rename(columns={-1:'%ambiguousDNA',0:'%trans',1:'trans_chr_score'}, inplace=True, level=2)
x.sort_index(level=[1,2], axis=1, inplace=True)
data_all=x.xs('all', level=0, axis=1).sort_values(('all','NumContacts'), ascending=False).reindex(['NumContacts','CPM','%exons','%ambiguousDNA','%trans','trans_chr_score'], axis=1, level=1)
data_unq=x.xs('unq', level=0, axis=1).sort_values(('all','NumContacts'), ascending=False).reindex(['NumContacts','CPM','%exons','%ambiguousDNA','%trans','trans_chr_score'], axis=1, level=1)
data_intergenic.columns=data_intergenic.columns.droplevel([0,1])
ig_tot=data_intergenic.sum(axis=1)
# # for c in [-1,0,1]:
# # data_intergenic[c]=data_intergenic[c].values/data_intergenic['tot'].values*100
ig_Ntrans=data_intergenic[1].values
ig_Ncis=data_intergenic[0].values
ig_Nambig=data_intergenic[-1].values
data_intergenic[0]=ig_Ntrans/(ig_Ntrans+ig_Ncis)*100 #trans
data_intergenic[-1]=ig_Nambig/ig_tot #trans
data_intergenic[1]=ig_tot
# # data_intergenic.drop([1], axis=1)
# # Nreads=x[(k,hh,'tot')].sum()/1000000
data_intergenic.rename(columns={-1:'%ambiguousDNA',0:'%trans',1:'N'}, inplace=True)
data_intergenic.columns.names=[None]
return data_unq, data_all, data_intergenic
def count_table(data, tx_df, genes_df, chr_df=None):
tx_dict=tx_df['ENSG'].to_dict()
for g in set(tx_df['ENSG'].values):
tx_dict[g]=g
tx_dict['*']='*'
len_df=pd.concat([tx_df['L'],genes_df['L']])
d=data.copy()
d.index=pd.MultiIndex.from_arrays([d.index.get_level_values(0).map(tx_dict), d.index.get_level_values(1)])
d=d.groupby(['ENSN','istrans']).sum()
d_total=d.groupby('ENSN').sum()
try:
d_cis=d.xs(0, level=1)
except KeyError:
d_cis=pd.DataFrame(np.zeros(len(d_total), int), index=d_total.index)
try:
d_trans=d.xs(1, level=1)
except KeyError:
d_trans=pd.DataFrame(np.zeros(len(d_total), int), index=d_total.index)
N=pd.concat([d_total,d_cis,d_trans], axis=1, sort=False).fillna(0).astype(int)
N.columns=['N','N_cis','N_trans']
N['CPM']=N['N']/N['N'].sum()*1000000
N['CPKM']=N['CPM']*1000/len_df.reindex(N.index)
if not chr_df is None:
chr_len_df=chr_df.copy()
chr_len_df['Ltrans']=chr_len_df['L'].sum()-chr_len_df['L']
# chr_len_cis_dict=chr_len_df['L'].to_dict()
# chr_len_trans_dict=chr_len_df['Ltrans'].to_dict()
chr_norm=(chr_len_df['L']/chr_len_df['Ltrans']).to_dict()
Lcis_over_Ltrans=genes_df['chr'].map(chr_norm)
N['tscore']=(N['N_trans']/N['N_cis']*Lcis_over_Ltrans.reindex(N.index)).values
N.index.name='ENSN'
N['name']=genes_df['name'].reindex(N.index)
N['type']=genes_df['type'].reindex(N.index)
N=N.reset_index().set_index(['ENSN','name','type'])
return N
def namify_count_table(ct, genes_df, chr_df):
# expects:
# chr_df=pd.read_csv(chrNameLengthFile, sep='\t', header=None, names=['chr','L'], index_col=0)
chr_len_df=chr_df.copy()
chr_len_df['Ltrans']=chr_len_df['L'].sum()-chr_len_df['L']
chr_len_cis_dict=chr_len_df['L'].to_dict()
chr_len_trans_dict=chr_len_df['Ltrans'].to_dict()
ct_byname=pd.concat([genes_df[['chr']],ct[['N','N_cis','N_trans','CPM','CPKM']].reset_index().set_index(['ENSN'])], axis=1, join='inner').set_index(['name','type'])
ct_byname['cis_density']=ct_byname['N_cis']/ct_byname['chr'].map(chr_len_cis_dict)
ct_byname['trans_density']=ct_byname['N_trans']/ct_byname['chr'].map(chr_len_trans_dict)
# ct_byname['NLoci']=np.ones(len(ct_byname), dtype=int)
ct_byname=ct_byname.groupby(['name','type']).sum()
ct_byname['tscore']=ct_byname['trans_density']/ct_byname['cis_density']
ct_byname=ct_byname.drop(['cis_density','trans_density'], axis=1)
return ct_byname
def summarize_counts(stats_file, tx_df, genes_df, chr_df, unq=True, tx_start='ENST', gene_start='ENSG'):
data=pd.read_csv(stats_file, delimiter=" ", header=None, names=['Count','ENSN','uniqmapping','istrans'], index_col=['ENSN','uniqmapping','istrans'])
if '*' in data.index.get_level_values(0):
data_intergenic=data.loc['*'].xs(-1, level=0).reindex([-1,0,1]).fillna(0)
d_intergenic=pd.DataFrame([data_intergenic.sum().iat[0],data_intergenic.iat[1,0],data_intergenic.iat[2,0]]).transpose()
d_intergenic.columns=['N','N_cis','N_trans']
d_intergenic.index=pd.Index(['*'], name='ENSN')
else:
d_intergenic=None
if unq:
data_unq=data.xs(1, level=1) #.drop('*', axis=0, level=1)
data_unq_exons=data_unq.loc[data_unq.index.get_level_values(0).str.startswith('ENST')]
data_unq_introns=data_unq.loc[data_unq.index.get_level_values(0).str.startswith('ENSG')]
else:
data_unq=data.groupby(['ENSN','istrans']).sum()
if '*' in data_unq.index.get_level_values(0):
data_unq=data_unq.drop('*', axis=0, level=0)
data_unq_exons=data_unq.loc[data_unq.index.get_level_values(0).str.startswith('ENST')]
data_unq_introns=data_unq.loc[data_unq.index.get_level_values(0).str.startswith('ENSG')]
if (len(data_unq_exons)>0) and (len(data_unq_introns)>0):
data_unq_all=[data_unq,data_unq_exons, data_unq_introns]
names=['all','exons','introns']
elif (len(data_unq_exons)>0):
data_unq_all=[data_unq_exons]
names=['all']
else:
data_unq_all=[data_unq_introns]
names=['all']
cts=[count_table(d, tx_df, genes_df, chr_df) for d in data_unq_all]
cts_name=[namify_count_table(ct,genes_df, chr_df) for ct in cts]
out={'ct':pd.concat([ct for ct in cts], axis=1, keys=names, sort=False), 'ct_name': pd.concat([ct_name for ct_name in cts_name], axis=1, keys=names, sort=False)}
out['ct'].loc[:,(slice(None),['N','N_cis','N_trans'])]=out['ct'].loc[:,(slice(None),['N','N_cis','N_trans','CPM'])].fillna(0).astype(int)
out['ct'].loc[:,(slice(None),['CPM','CPKM'])]=out['ct'].loc[:,(slice(None),['CPM','CPKM'])].fillna(0)
out['ct_name'].loc[:,(slice(None),['N','N_cis','N_trans'])]=out['ct_name'].loc[:,(slice(None),['N','N_cis','N_trans'])].fillna(0).astype(int)
out['ct_name'].loc[:,(slice(None),['CPM','CPKM'])]=out['ct_name'].loc[:,(slice(None),['CPM','CPKM'])].fillna(0)
# out['ct_name']=out['ct_name'].drop('')
return out['ct'], out['ct_name'], d_intergenic
def rna_stats2noDNA(stats_file, tx_df, genes_df, chr_norm):
np.seterr(divide='ignore', invalid='ignore')
data=pd.read_csv(stats_file, delimiter=" ", header=None, names=['Count','ENSN','uniqmapping','istrans'])
data_exons=data.loc[data['ENSN'].str.startswith('ENST')].copy()
data_exons.rename(columns={"ENSN": "ENST"}, inplace=True)
# data_exons=pd.read_csv(exons_stats_file, delimiter=" ", header=None, names=['Count','ENST','uniqmapping','istrans'])
data_tx=data_exons.join(tx_df, on='ENST').drop('ENST',axis=1).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_intergenic=data.loc[data['ENSN'].str.startswith('*')].copy()
data_intergenic.rename(columns={"ENSN": "ENSG"}, inplace=True)
data_intergenic=data_intergenic.groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
# pd.read_csv(intergene_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans']).groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
data_gb=data.loc[data['ENSN'].str.startswith('ENSG')].copy()
data_gb.rename(columns={"ENSN": "ENSG"}, inplace=True)
data_gb=data_gb.groupby(['ENSG','uniqmapping','istrans']).sum().unstack(level=[1,2]).fillna(0)
# pd.read_csv(introns_stats_file, delimiter=" ", header=None, names=['Count','ENSG','uniqmapping','istrans'])
mydf={}
n=['exons','introns']
for nn, data_bygene in zip(n,[data_tx, data_gb]):
unq=data_bygene.loc[:, (slice(None),[1],slice(None))].sum(axis=1,level=2)
alls=data_bygene.sum(axis=1,level=2)
unq['NumContacts']=unq.sum(axis=1)
alls['NumContacts']=alls.sum(axis=1)
mydf[nn]= | pd.concat([unq,alls], axis=1, keys=['unq','all']) | pandas.concat |
import os
import numpy as np
import pandas as pd
import time
import sys
import math
import pickle
from sklearn.utils import shuffle
import pathlib
from pathlib import Path
from multiprocessing import Pool
import preprocess
def run_cluster_calculate_norm_stats():
H5_PATH_PREFIX = str(Path.cwd().parent / 'parsed_files') + '/' #"/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = str(Path.cwd().parent / 'parsed_files' / 'all_h5_files.txt') #"/users/ak1774/scratch/esport/death_prediction/all_h5_files.txt"
now = time.time()
h5_files = get_h5_file_list(H5_PATH_PREFIX,H5_FILE_LIST)
h5_files = np.random.choice(h5_files,50)
data = load_data_chunk(h5_files,worker_id=None,num_workers=None)
#data = load_all_data(H5_PATH_PREFIX,H5_FILE_LIST)
print("Loading took: ", time.time()-now)
print("Data shape: ", data.shape)
sys.stdout.flush()
now = time.time()
norm_stats = calculate_normalization_stats(data)
print("Collecting min max took: ", time.time()-now)
sys.stdout.flush()
now = time.time()
with open("norm_stats.pickle", 'wb') as f:
pickle.dump(norm_stats, f, pickle.HIGHEST_PROTOCOL)
print("Pickleing took: ", time.time()-now)
sys.stdout.flush()
def run_cluster_randomize(data_type,worker_id,num_workers):
H5_PATH_PREFIX = str(Path.cwd().parent / 'parsed_files') + '/'
if data_type == "train":
#H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parsed_files/"
H5_FILE_LIST = str(Path.cwd() / 'training_files.txt') #"/users/ak1774/scratch/esport/death_prediction/cluster_scripts/training_files.txt"
OUT_FOLDER = str(Path.cwd().parent / 'randomized_data' / 'train') + '/'
elif data_type == "test":
#H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = str(Path.cwd() / 'test_files.txt') #"/users/ak1774/scratch/esport/death_prediction/cluster_scripts/test_files.txt"
OUT_FOLDER = str(Path.cwd().parent / 'randomized_data' / 'test') + '/' #"/mnt/lustre/groups/cs-dclabs-2019/esport/death_prediction_data/randomized_data/test/"
elif data_type == "validation":
#H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = str(Path.cwd() / 'validation_files.txt') #"/users/ak1774/scratch/esport/death_prediction/cluster_scripts/validation_files.txt"
OUT_FOLDER = str(Path.cwd().parent / 'randomized_data' / 'validation') + '/' #"/mnt/lustre/groups/cs-dclabs-2019/esport/death_prediction_data/randomized_data/validation/"
WORKER_ID = worker_id #int(os.environ['SLURM_ARRAY_TASK_ID']) #TODO
NUM_WORKERS = num_workers #int(os.environ['SLURM_ARRAY_TASK_COUNT'])
now = time.time()
h5_files = get_h5_file_list(H5_PATH_PREFIX,H5_FILE_LIST)
data = load_data_chunk(h5_files,WORKER_ID,NUM_WORKERS)
# shuffle the data
data = shuffle(data)
now = time.time()
DATA_CHUNK_SIZE = 4000
num_chunks = int(data.shape[0] / DATA_CHUNK_SIZE)
rest = data.shape[0] - num_chunks * DATA_CHUNK_SIZE
# the first rest chunk will contain 301 points, whis way every point is used, and they all have a similar size
# NOTE I assume thet a worker have at least DATA_CHUNK_SIZE*DATA_CHUNK_SIZE datapoints, otherwise this tactic can fail...
# Actually we just throw away a little bit of data, that is fine...
current_index = 0
for i in range(num_chunks):
size = DATA_CHUNK_SIZE
if i < rest:
size += 1
duta_chunk = data[current_index:current_index+size]
duta_chunk.to_hdf(OUT_FOLDER + 'data_chunk_' + str(WORKER_ID) + "_" + str(i) + '.h5', key='duta_chunk', mode='w', complevel = 9,complib='zlib')
current_index += size
print("Saving took: ", time.time()-now)
sys.stdout.flush()
def run_cluster_normalize(data_type = None,worker_id=0,num_workers=None):
if data_type == None:
H5_PATH_PREFIX = str(Path.cwd() / 'parsed_files') + '/' #"/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = str(Path.cwd() / 'parsed_files' / 'all_h5_files.txt') #"/users/ak1774/scratch/esport/death_prediction/all_h5_files.txt"
OUT_FOLDER = str(Path.cwd() / 'data_out') + '/'
elif data_type == "train":
H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/training_files.txt"
OUT_FOLDER = "/mnt/lustre/groups/cs-dclabs-2019/esport/death_prediction_data/randomized_data/train/"
elif data_type == "test":
H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/test_files.txt"
OUT_FOLDER = "/mnt/lustre/groups/cs-dclabs-2019/esport/death_prediction_data/randomized_data/test/"
elif data_type == "validation":
H5_PATH_PREFIX = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/parse_job_out/parsed_files/"
H5_FILE_LIST = "/users/ak1774/scratch/esport/death_prediction/cluster_scripts/validation_files.txt"
OUT_FOLDER = "/mnt/lustre/groups/cs-dclabs-2019/esport/death_prediction_data/randomized_data/validation/"
WORKER_ID = worker_id #int(os.environ['SLURM_ARRAY_TASK_ID'])
if num_workers == None:
NUM_WORKERS = os.cpu_count() #int(os.environ['SLURM_ARRAY_TASK_COUNT'])
else:
NUM_WORKERS = 1
now = time.time()
norm_stats = None
with open("norm_stats.pickle", 'rb') as f:
norm_stats = pickle.load(f)
print("Unpickleing took: ", time.time()-now)
sys.stdout.flush()
now = time.time()
h5_files = get_h5_file_list(H5_PATH_PREFIX,H5_FILE_LIST)
data_chunk = load_data_chunk(h5_files,WORKER_ID,NUM_WORKERS)
RECALCULATE_WHO_DIES_NEXT_LABELS = False
if RECALCULATE_WHO_DIES_NEXT_LABELS:
data_chunk = preprocess.create_who_dies_next_labels(data_chunk)
print("Loading took: ", time.time()-now)
print("Data shape: ", data_chunk.shape)
sys.stdout.flush()
now = time.time()
data = normalize_data(data_chunk,norm_stats)
print("Normalizing took: ", time.time()-now)
sys.stdout.flush()
# shuffle the data
data = shuffle(data)
now = time.time()
DATA_CHUNK_SIZE = 4000
num_chunks = int(data.shape[0] / DATA_CHUNK_SIZE)
rest = data.shape[0] - num_chunks * DATA_CHUNK_SIZE
# the first rest chunk will contain 301 points, whis way every point is used, and they all have a similar size
# NOTE I assume thet a worker have at least DATA_CHUNK_SIZE*DATA_CHUNK_SIZE datapoints, otherwise this tactic can fail...
# Actually we just throw away a little bit of data, that is fine...
current_index = 0
for i in range(num_chunks):
size = DATA_CHUNK_SIZE
if i < rest:
size += 1
duta_chunk = data[current_index:current_index+size]
duta_chunk.to_hdf(OUT_FOLDER + 'data_chunk_' + str(WORKER_ID) + "_" + str(i) + '.h5', key='duta_chunk', mode='w', complevel = 9,complib='zlib')
current_index += size
print("Saving took: ", time.time()-now)
sys.stdout.flush()
def get_h5_file_list(path_prefix,h5_file_list_path):
with open(h5_file_list_path) as f:
h5_files = f.readlines()
h5_files = [x.strip() for x in h5_files]
h5_files = [x.replace("./",path_prefix) for x in h5_files]
return h5_files
def load_data_from_file(filename):
return load_data_chunk([filename],worker_id=None,num_workers=None)
def load_all_data(path_prefix,h5_file_list_path):
h5_files = get_h5_file_list(path_prefix,h5_file_list_path)
return load_data_chunk(h5_files,worker_id=None,num_workers=None)
# will load all data if worker id is None
# TODO BUG: this is horribly inefficient. Concatenate will reallocate the whole dataset at every call...
# Solution: allocate a large memory beforehand... If dont know how big we need, use doubleing whenewer we run out? Maybe shrink in the end.
def load_data_chunk(h5_files,worker_id=None,num_workers=None):
if worker_id is not None:
files_per_worker = int(math.ceil(float(len(h5_files)) / num_workers))
h5_files = h5_files[worker_id*files_per_worker : (worker_id+1)*files_per_worker]
# h5_files = h5_files[0:10] # debug only take the first 10
# read in all the data, and concatenate it into on big data frame
data = None
num_has_nan = 0
for i,filename in enumerate(h5_files):
if(i % 10) == 9:
print("Loading file: ",i)
sys.stdout.flush()
#if data is not None:
# data.info()
if data is None:
data = pd.read_hdf(filename)
if data.isnull().values.any():
data = None
num_has_nan += 1
continue
else:
new_data = | pd.read_hdf(filename) | pandas.read_hdf |
import re
from typing import List, Tuple, Dict
from pandas import Series, DataFrame
def regex(text: str, pattern: str) -> List[str]:
"""グループ入り正規表現にマッチさせて、ヒットした場合はそれぞれの文字列の配列、そうでない場合は空配列を返す"""
output: List[str] = []
for m in re.finditer(pattern, text, re.MULTILINE):
for x in m.groups():
output.append(x)
return output
def extract_numbers(series: Series, pair_data_patterns: List[str], single_data_patterns: List[str])\
-> Tuple[List[str], List[str]]:
"""ある列について、その各行に含まれる文字列から、数字を1つないし2つ抽出して、リストにまとめる。
数字が2つ→リストA・リストBにそれぞれの数字を追加
数字が1つ→リストA・リストBに同じ数字を追加
Parameters
----------
series ある列
pair_data_pattern 数字が2つ存在する場合のパターン
single_data_pattern 数字が1つ存在する場合のパターン
Returns
-------
分析後のリストA・リストB
"""
list_a: List[str] = []
list_b: List[str] = []
for data_line in series.values:
if data_line != data_line:
list_a.append('')
list_b.append('')
continue
flg = False
# 数字が2つ存在する場合のパターン
for pair_data_pattern in pair_data_patterns:
result = regex(data_line, pair_data_pattern)
if len(result) >= 2:
list_a.append(result[0])
list_b.append(result[1])
flg = True
break
if flg:
continue
# 数字が1つ存在する場合のパターン
for single_data_pattern in single_data_patterns:
result = regex(data_line, single_data_pattern)
if len(result) >= 1:
list_a.append(result[0])
list_b.append(result[0])
flg = True
break
if flg:
continue
return list_a, list_b
def convert_columns(df: DataFrame, rename_columns: Dict[str, str], delete_columns: List[str]) -> DataFrame:
"""DataFrameのカラム名を変換する
Parameters
----------
df DataFrame
rename_columns リネームするカラム名
delete_columns 削除するカラム名
Returns
-------
加工後のDataFrame
"""
temp: List[Dict[str, any]] = []
for record in df.to_dict(orient='records'):
record: Dict[str, any] = record
record2: Dict[str, any] = {}
for key, val in record.items():
if key in delete_columns:
continue
if val != val:
continue
if key in rename_columns:
record2[rename_columns[key]] = val
else:
record2[key] = val
temp.append(record2)
return | DataFrame.from_records(temp) | pandas.DataFrame.from_records |
# Get arxiv data
import json
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from io import BytesIO
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from kaggle.api.kaggle_api_extended import KaggleApi
from eurito_indicators import PROJECT_DIR
from eurito_indicators.pipeline.clustering_naming import make_doc_comm_lookup
from eurito_indicators.pipeline.processing_utils import covid_getter
GRID_PATH = f"{PROJECT_DIR}/inputs/data/grid"
CORD_META_PATH = f"{PROJECT_DIR}/inputs/data/metadata.csv.zip"
DISC_QUERY = f"{PROJECT_DIR}/inputs/data/arxiv_discipline.csv"
COV_PAPERS_PATH = f"{PROJECT_DIR}/inputs/data/arxiv_papers_covid.csv"
def get_arxiv_articles():
"""Get arxiv - and cord - articles"""
art = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_articles_v2.csv",
dtype={"id": str},
parse_dates=["created"],
)
art = art.rename(columns={"id": "article_id"})
art["month_year"] = [
datetime(x.year, x.month, 1) if pd.isnull(x) == False else np.nan
for x in art["created"]
]
selected_columns = [
"article_id",
"created",
"month_year",
"title",
"journal_ref",
"doi",
"authors",
"abstract",
"mag_id",
"citation_count",
"article_source",
]
return art[selected_columns]
def get_arxiv_institutes():
"""Lookup between paper ids and org id"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_institutes_updated.csv",
dtype={"article_id": str, "institute_id": str},
)
return inst
def get_article_categories():
"""Article categories"""
inst = pd.read_csv(
f"{PROJECT_DIR}/inputs/data/arxiv_article_categories.csv",
dtype={"article_id": str},
)
return inst
def get_arxiv_w2v():
with open(f"{PROJECT_DIR}/outputs/models/arxiv_w2v.p", "rb") as infile:
return pickle.load(infile)
def fetch_grid():
"""Fetch the grid data"""
if os.path.exists(GRID_PATH) is False:
logging.info("Collecting Grid data")
os.makedirs(GRID_PATH, exist_ok=True)
g = requests.get("https://ndownloader.figshare.com/files/28431024")
g_z = ZipFile(BytesIO(g.content))
g_z.extractall(GRID_PATH)
def fetch_cord_meta():
"""Fetch the cord metadata"""
if os.path.exists(CORD_META_PATH) is False:
logging.info("Fetching cord data")
api = KaggleApi()
api.authenticate()
api.dataset_download_file(
"allen-institute-for-ai/CORD-19-research-challenge",
"metadata.csv",
path=f"{PROJECT_DIR}/inputs/data",
)
def get_cord_metadata():
"""Gets the cord metadata"""
meta = | pd.read_csv(f"{PROJECT_DIR}/inputs/data/metadata.csv.zip", compression="zip") | pandas.read_csv |
import os
import argparse
import pandas as pd
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data_path", type=str, default="./data")
args = parser.parse_args()
return args
def main():
args = get_args()
ages, img_paths = [], []
for filename in tqdm(os.listdir(os.path.join(args.data_path, 'AgeDB'))):
_, _, age, gender = filename.split('.')[0].split('_')
ages.append(age)
img_paths.append(f"AgeDB/{filename}")
outputs = dict(age=ages, path=img_paths)
output_dir = os.path.join(args.data_path, "meta")
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, "agedb.csv")
df = | pd.DataFrame(data=outputs) | pandas.DataFrame |
from ...common import requires as _requires
from ...io.geotable.utils import to_gdf, to_df
from warnings import warn as _Warn
import functools as _f
import sys as _sys
@_requires("geopandas")
def spatial_join(df1, df2, left_geom_col='geometry',
right_geom_col='geometry', **kwargs):
"""
Spatial join of two Pandas DataFrames. Calls out to Geopandas.
Parameters
----------
left_df : pandas.DataFrame
right_df: pandas.DataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only
left_df geometry column
* 'right': use keys from right_df; retain only
right_df geometry column
* 'inner': use intersection of keys from both dfs;
retain only left_df geometry column
op : string, default 'intersection'
One of {'intersects', 'contains', 'within'}.
See http://toblerity.org/shapely/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names
(left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column namei
(right GeoDataFrame).
"""
import geopandas as gpd
gdf1 = to_gdf(df1, geom_col=left_geom_col)
gdf2 = to_gdf(df2, geom_col=right_geom_col)
out = gpd.tools.sjoin(gdf1, gdf2, **kwargs)
return to_df(out)
try:
import pandas as _pd
@_requires("pandas")
@_f.wraps(_pd.merge)
def join(*args, **kwargs):
return | _pd.merge(*args, **kwargs) | pandas.merge |
# -*- coding: utf-8 -*-
import math
import random
import pandas as pd
import numpy as np
random.seed(0)
def rand(a, b):
return (b - a) * random.random() + a
def make_matrix(m, n, fill=0.0): # 创造一个指定大小的矩阵
mat = []
for i in range(m):
mat.append([fill] * n)
return mat
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
def sigmod_derivate(x):
return x * (1 - x)
def deal_maxmin(serialdataframe):
"归一化"
max_min_scaler = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))
for i in range(len(serialdataframe)):
if(serialdataframe.iloc[i][0] != 0): #只有不是全为零的情况才进行归一化
return serialdataframe.apply(max_min_scaler)
return serialdataframe
class Bpnn:
def setup(self, ni, nh, no):
self.input_n = ni + 1
self.hidden_n = nh
self.output_n = no
# init cells
self.input_cells = [1.0] * self.input_n
self.hidden_cells = [1.0] * self.hidden_n
self.output_cells = [1.0] * self.output_n
# 初始化权重
self.input_weights = make_matrix(self.input_n, self.hidden_n)
self.output_weights = make_matrix(self.hidden_n, self.output_n)
# random activate
for i in range(self.input_n):
for h in range(self.hidden_n):
self.input_weights[i][h] = rand(-0.2, 0.2)
for h in range(self.hidden_n):
for o in range(self.output_n):
self.output_weights[h][o] = rand(-2.0, 2.0)
# init correction matrix
self.input_correction = make_matrix(self.input_n, self.hidden_n)
self.output_correction = make_matrix(self.hidden_n, self.output_n)
def predict(self, inputs):
# 激励输入层
for i in range(self.input_n - 1):
self.input_cells[i] = inputs[i]
# 激励隐藏层
for j in range(self.hidden_n):
total = 0.0
for i in range(self.input_n):
total += self.input_cells[i] * self.input_weights[i][j]
self.hidden_cells[j] = sigmoid(total)
# 激励输出层
for k in range(self.output_n):
total = 0.0
for j in range(self.hidden_n):
total += self.hidden_cells[j] * self.output_weights[j][k]
self.output_cells[k] = sigmoid(total)
return self.output_cells[:]
def back_propagate(self, case, label, learn, correct):
# feed forward
self.predict(case)
# 输出层错误
output_deltas = [0.0] * self.output_n
for o in range(self.output_n):
error = label[o] - self.output_cells[o]
output_deltas[o] = sigmod_derivate(self.output_cells[o]) * error
# get hidden layer error
hidden_deltas = [0.0] * self.hidden_n
for h in range(self.hidden_n):
error = 0.0
for o in range(self.output_n):
error += output_deltas[o] * self.output_weights[h][o]
hidden_deltas[h] = sigmod_derivate(self.hidden_cells[h]) * error
# 更新输出权重
for h in range(self.hidden_n):
for o in range(self.output_n):
change = output_deltas[o] * self.hidden_cells[h]
self.output_weights[h][o] += learn * change + correct * self.output_correction[h][o]
self.output_correction[h][o] = change
# 更新输入权重
for i in range(self.input_n):
for h in range(self.hidden_n):
change = hidden_deltas[h] * self.input_cells[i]
self.input_weights[i][h] += learn * change + correct * self.input_correction[i][h]
self.input_correction[i][h] = change
# 全局错误
error = 0.0
for o in range(len(label)):
error += 0.5 * (label[o] - self.output_cells[o]) ** 2
return error
def train(self, cases, labels, limit=10000, learn=0.05, correct=0.1):
for i in range(limit):
error = 0.0
for i in range(len(cases)):
label = labels[i]
case = cases[i]
error += self.back_propagate(case, label, learn, correct)
def getAvgerror(numlist1, numlist2):
'得到两序列间的平均距离,这里用作求平均误差'
totalerror = 0
for i in range(len(numlist1)):
totalerror += abs(numlist1[i]-numlist2[i])
return totalerror/len(numlist1)
#
# def getParameter(cases, labels):
# '求出最佳参数:隐层神经元个数与学习率'
# bp = Bpnn()
# imputnum = len(cases[0])
# casetrain1 = []
# casetrain2 = [] # 验证集,用来修正
# labeltrain1 = []
# labeltrain2 = [] # 验证集,用来修正
# predict_results = []
# for i in range(len(cases)):
# if i < len(cases)*7/10: #七三分
# casetrain1.append(cases[i])
# labeltrain1.append(labels[i])
# else:
# casetrain2.append(cases[i])
# labeltrain2.append(labels[i][0]*100)
#
# bestnnum = int(imputnum/2)
# bestlrate = 0.01
# bp.setup(imputnum, bestnnum, 1)
# bp.train(cases, labels, 5000, bestlrate, 0.1)
# for case in casetrain2:
# predict_results.append(bp.predict(case)[0]*100)
# minerror = getAvgerror(labeltrain2, predict_results)
#
# for nnum in range(int(imputnum/2), imputnum+1):# 神经元个数从输入层大小的一半开始,到输入层大小停止迭代
# for lrate in np.arange(0.01, 0.5+0.01, 0.01): # 学习率从0.01开始迭代,步长为0.01
# predict_results = []
# bp.setup(imputnum, nnum, 1)
# bp.train(cases, labels, 5000, lrate, 0.1)
# for case in casetrain2:
# predict_results.append(bp.predict(case)[0] * 100)
# if getAvgerror(labeltrain2, predict_results) < minerror: # 如果找到更小的平均错误数,更新
# minerror = getAvgerror(labeltrain2, predict_results)
# bestnnum = nnum
# bestlrate = lrate
# print("正在调参,当前最小平均误差为{},神经元个数为{},学习率为{:.2f}".format(minerror, bestnnum, bestlrate))
# return bestnnum, bestlrate
def keyinTotalscore(traindata, testdata):
nn = Bpnn()
cases = []
labels = []
predict_list = []
predict_totalscores = []
# 训练集和测试集一起归一化
rapid_frame = deal_maxmin(pd.concat([traindata[['rapid_times']], testdata[['rapid_times']]]))
deceleration_frame = deal_maxmin(pd.concat([traindata[['deceleration_times']], testdata[['deceleration_times']]]))
fatigueDriving_frame = deal_maxmin(pd.concat([traindata[['fatigueDriving_times']], testdata[['fatigueDriving_times']]]))
idlePreheating_frame = deal_maxmin(pd.concat([traindata[['idlePreheating_times']], testdata[['idlePreheating_times']]]))
overlongIdle_frame = deal_maxmin(pd.concat([traindata[['overlongIdle_times']], testdata[['overlongIdle_times']]]))
coastingEngineoff_frame = deal_maxmin(pd.concat([traindata[['coastingEngineoff_times']], testdata[['coastingEngineoff_times']]]))
speeding_frame = deal_maxmin(pd.concat([traindata[['speeding_times']], testdata[['speeding_times']]]))
changeRoad_frame = deal_maxmin(pd.concat([traindata[['changeRoad_times']], testdata[['changeRoad_times']]]))
safeMiles_frame = deal_maxmin(pd.concat([traindata[['safeMiles_flag']], testdata[['safeMiles_flag']]]))
speed_stabilization_frame = deal_maxmin(pd.concat([traindata[['speed_stabilization']], testdata[['speed_stabilization']]]))
lowVisibilitySpeeding_frame = deal_maxmin(pd.concat([traindata[['lowVisibilitySpeeding_times']], testdata[['lowVisibilitySpeeding_times']]]))
crossWind_frame = deal_maxmin(pd.concat([traindata[['crossWind_time']], testdata[['crossWind_time']]]))
highWind_frame = deal_maxmin(pd.concat([traindata[['highWind_time']], testdata[['highWind_time']]]))
adverseweatherSpeeding_frame = deal_maxmin(pd.concat([traindata[['adverseweatherSpeeding_times']], testdata[['adverseweatherSpeeding_times']]]))
average_speed_frame = deal_maxmin(pd.concat([traindata[['average_speed']], testdata[['average_speed']]]))
againstWind_frame = deal_maxmin(pd.concat([traindata[['againstWind_time']], testdata[['againstWind_time']]]))
diseconomicSpeed_frame = deal_maxmin(pd.concat([traindata[['diseconomicSpeed_rate']], testdata[['diseconomicSpeed_rate']]]))
# 分表
rapid_times = rapid_frame[0:len(traindata)]
rapid_times2 = rapid_frame[len(traindata):len(traindata) + len(testdata)]
deceleration_times = deceleration_frame[0:len(traindata)]
deceleration_times2 = deceleration_frame[len(traindata):len(traindata) + len(testdata)]
fatigueDriving_times = fatigueDriving_frame[0:len(traindata)]
fatigueDriving_times2 = fatigueDriving_frame[len(traindata):len(traindata) + len(testdata)]
idlePreheating_times = idlePreheating_frame[0:len(traindata)]
idlePreheating_times2 = idlePreheating_frame[len(traindata):len(traindata) + len(testdata)]
overlongIdle_times = overlongIdle_frame[0:len(traindata)]
overlongIdle_times2 = overlongIdle_frame[len(traindata):len(traindata) + len(testdata)]
coastingEngineoff_times = coastingEngineoff_frame[0:len(traindata)]
coastingEngineoff_times2 = coastingEngineoff_frame[len(traindata):len(traindata) + len(testdata)]
speeding_times = speeding_frame[0:len(traindata)]
speeding_times2 = speeding_frame[len(traindata):len(traindata) + len(testdata)]
changeRoad_times = changeRoad_frame[0:len(traindata)]
changeRoad_times2 = changeRoad_frame[len(traindata):len(traindata) + len(testdata)]
safeMiles_flag = safeMiles_frame[0:len(traindata)]
safeMiles_flag2 = safeMiles_frame[len(traindata):len(traindata) + len(testdata)]
speed_stabilization = speed_stabilization_frame[0:len(traindata)].values.flatten().tolist() # 对速度稳定性特殊处理,取与1的距离
speed_stabilization = [1 - float(i) for i in speed_stabilization]
speed_stabilization = pd.DataFrame(speed_stabilization, columns=['speed_stabilization'])
speed_stabilization2 = speed_stabilization_frame[len(traindata):len(traindata) + len(testdata)].values.flatten().tolist() # 对速度稳定性特殊处理,取与1的距离
speed_stabilization2 = [1 - float(i) for i in speed_stabilization2]
speed_stabilization2 = pd.DataFrame(speed_stabilization2, columns=['speed_stabilization'])
lowVisibilitySpeeding_times = lowVisibilitySpeeding_frame[0:len(traindata)]
lowVisibilitySpeeding_times2 = lowVisibilitySpeeding_frame[len(traindata):len(traindata) + len(testdata)]
crossWind_time = crossWind_frame[0:len(traindata)]
crossWind_time2 = crossWind_frame[len(traindata):len(traindata) + len(testdata)]
highWind_time = highWind_frame[0:len(traindata)]
highWind_time2 = highWind_frame[len(traindata):len(traindata) + len(testdata)]
adverseweatherSpeeding_times = adverseweatherSpeeding_frame[0:len(traindata)]
adverseweatherSpeeding_times2 = adverseweatherSpeeding_frame[len(traindata):len(traindata) + len(testdata)]
average_speed = average_speed_frame[0:len(traindata)].values.flatten().tolist() # 对平均速度特殊处理,取与1的距离
average_speed = [1 - float(i) for i in average_speed]
average_speed = pd.DataFrame(average_speed, columns=['average_speed'])
average_speed2 = average_speed_frame[len(traindata):len(traindata) + len(testdata)].values.flatten().tolist() # 对平均速度特殊处理,取与1的距离
average_speed2 = [1 - float(i) for i in average_speed2]
average_speed2 = pd.DataFrame(average_speed2, columns=['average_speed'])
againstWind_time = againstWind_frame[0:len(traindata)]
againstWind_time2 = againstWind_frame[len(traindata):len(traindata) + len(testdata)]
diseconomicSpeed_rate = diseconomicSpeed_frame[0:len(traindata)]
diseconomicSpeed_rate2 = diseconomicSpeed_frame[len(traindata):len(traindata) + len(testdata)]
totalResults = traindata['totalscore']
# 训练集与打分的dataframe
for i in range(len(traindata)):
templist = [rapid_times.iloc[i][0], deceleration_times.iloc[i][0], fatigueDriving_times.iloc[i][0],
idlePreheating_times.iloc[i][0], overlongIdle_times.iloc[i][0], coastingEngineoff_times.iloc[i][0], speeding_times.iloc[i][0],
changeRoad_times.iloc[i][0], safeMiles_flag.iloc[i][0], speed_stabilization.iloc[i][0], lowVisibilitySpeeding_times.iloc[i][0],
crossWind_time.iloc[i][0], highWind_time.iloc[i][0], adverseweatherSpeeding_times.iloc[i][0], average_speed.iloc[i][0],
againstWind_time.iloc[i][0], diseconomicSpeed_rate.iloc[i][0]]
cases.append(templist)
labels.append([totalResults[i] * 0.01])
# neuron_num, learn_rate = getParameter(cases, labels) # 求得最佳超参数
nn.setup(17, 27, 1)
nn.train(cases, labels, 10000, 0.1, 0.1)
# 测试集的dataframe
for i in range(len(testdata)):
predict_templist = [rapid_times2.iloc[i][0], deceleration_times2.iloc[i][0], fatigueDriving_times2.iloc[i][0],
idlePreheating_times2.iloc[i][0], overlongIdle_times2.iloc[i][0], coastingEngineoff_times2.iloc[i][0],
speeding_times2.iloc[i][0], changeRoad_times2.iloc[i][0], safeMiles_flag2.iloc[i][0], speed_stabilization2.iloc[i][0],
lowVisibilitySpeeding_times2.iloc[i][0], crossWind_time2.iloc[i][0], highWind_time2.iloc[i][0], adverseweatherSpeeding_times2.iloc[i][0],
average_speed2.iloc[i][0], againstWind_time2.iloc[i][0], diseconomicSpeed_rate2.iloc[i][0]]
predict_list.append(predict_templist)
# 计算每一行的综合得分
for case in predict_list:
predict_totalscores.append(nn.predict(case)[0] * 100)
print(predict_totalscores)
testdata['totalscorebyPredict'] = predict_totalscores
testdata.to_csv(testfile, encoding="utf_8_sig", index=0)
print("综合评分已计算完成并写入文件")
def keyinSafescore(traindata,testdata):
nn = Bpnn()
cases = []
labels = []
predict_list = []
predict_totalscores = []
# 训练集和测试集一起归一化
rapid_frame = deal_maxmin(pd.concat([traindata[['rapid_times']], testdata[['rapid_times']]]))
deceleration_frame = deal_maxmin(pd.concat([traindata[['deceleration_times']], testdata[['deceleration_times']]]))
fatigueDriving_frame = deal_maxmin(pd.concat([traindata[['fatigueDriving_times']], testdata[['fatigueDriving_times']]]))
idlePreheating_frame = deal_maxmin(pd.concat([traindata[['idlePreheating_times']], testdata[['idlePreheating_times']]]))
overlongIdle_frame = deal_maxmin(pd.concat([traindata[['overlongIdle_times']], testdata[['overlongIdle_times']]]))
coastingEngineoff_frame = deal_maxmin(pd.concat([traindata[['coastingEngineoff_times']], testdata[['coastingEngineoff_times']]]))
speeding_frame = deal_maxmin(pd.concat([traindata[['speeding_times']], testdata[['speeding_times']]]))
changeRoad_frame = deal_maxmin(pd.concat([traindata[['changeRoad_times']], testdata[['changeRoad_times']]]))
safeMiles_frame = deal_maxmin(pd.concat([traindata[['safeMiles_flag']], testdata[['safeMiles_flag']]]))
speed_stabilization_frame = deal_maxmin(pd.concat([traindata[['speed_stabilization']], testdata[['speed_stabilization']]]))
lowVisibilitySpeeding_frame = deal_maxmin(pd.concat([traindata[['lowVisibilitySpeeding_times']], testdata[['lowVisibilitySpeeding_times']]]))
crossWind_frame = deal_maxmin(pd.concat([traindata[['crossWind_time']], testdata[['crossWind_time']]]))
highWind_frame = deal_maxmin(pd.concat([traindata[['highWind_time']], testdata[['highWind_time']]]))
adverseweatherSpeeding_frame = deal_maxmin(pd.concat([traindata[['adverseweatherSpeeding_times']], testdata[['adverseweatherSpeeding_times']]]))
average_speed_frame = deal_maxmin(pd.concat([traindata[['average_speed']], testdata[['average_speed']]]))
againstWind_frame = deal_maxmin( | pd.concat([traindata[['againstWind_time']], testdata[['againstWind_time']]]) | pandas.concat |
import os
import time
import csv
import torch
import torch.nn as nn
from mvcnn import Model
from args import get_parser
import torch.nn.functional as F
from dataset import MultiViewDataSet, preprocess
from torch.utils.data import DataLoader
# from helpers.logger import Logger
# import util
import numpy as np
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import classification_report, confusion_matrix, f1_score
# torch.use_deterministic_algorithms(True)
seed = 1
torch.manual_seed(seed)
import random
random.seed(seed)
np.random.seed(seed)
class Controller(object):
def __init__(self, args):
self.args = args
self.device = torch.device(args.device if torch.cuda.is_available() and args.device != 'cpu' else 'cpu')
self.model = nn.DataParallel(Model(args.model, args.pretrained, args.emb_dim, args.n_class))
self.model.to(self.device)
def train(self, train_loader, val_loader):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.args.epoch)
best_acc, best_loss, patience_count, start_epoch = 0.0, 1e9, 0, 0
weights = train_loader.dataset.weights.to(self.device)
indices = torch.repeat_interleave(torch.arange(self.args.batch), self.args.views).to(self.device)
# logger = Logger(self.args.model_path)
if self.args.resume:
best_acc, start_epoch, optimizer = self.load()
for epoch in range(start_epoch, self.args.epoch):
epoch_loss = .0
total, correct = 0, 0
start = time.time()
self.model.train()
for x, yt in train_loader:
x, yt = x.to(self.device), yt.to(self.device)
xi, xm, yp = self.model(x)
if self.args.regime == 'supervised':
loss = Model.ce_loss(yp, yt, weights)
elif self.args.regime == 'contrastive':
loss = Model.jsd_loss(xi, xm, indices)
elif self.args.regime == 'hybrid':
loss = Model.ce_loss(yp, yt, weights) + Model.jsd_loss(xi, xm, indices)
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self.args.regime != 'contrastive':
_, yp = torch.max(yp.data, 1)
total += yt.size(0)
correct += (yp == yt).sum().item()
train_acc = 100 * correct / total if self.args.regime != 'contrastive' else .0
end = time.time()
self.model.eval()
val_acc, val_loss = self.eval(val_loader)
if self.args.regime != 'contrastive' and val_acc > best_acc:
best_acc = val_acc
if not os.path.exists(self.args.model_path):
os.mkdir(self.args.model_path)
torch.save(self.model.module.state_dict(), f'{self.args.model_path}/model-best.pth')
# torch.save(self.model.state_dict(), f'{self.args.model_path}/model-best.pth')
print(f'Epoch {epoch + 1}/{self.args.epoch} | Time: {end - start:.2f}s '
f'| Train Loss: {epoch_loss / len(train_loader): .4f} | Train Acc: {train_acc:.2f}% | '
f'Val Loss: {val_loss:.4f} | '
f'Val Acc: {val_acc:.2f}% | Best Acc: {best_acc:.2f}%')
# Log epoch to tensorboard
# See log using: tensorboard --logdir='args.model_path' --host localhost
# util.logEpoch(logger, self.model, epoch + 1, val_loss, val_acc)
if best_loss > val_loss:
best_loss = val_loss
patience_count = 0
if self.args.regime == 'contrastive':
if not os.path.exists(self.args.model_path):
os.mkdir(self.args.model_path)
# torch.save(self.model.state_dict(), f'{self.args.model_path}/model-best.pth')
torch.save(self.model.module.state_dict(), f'{self.args.model_path}/model-best.pth')
else:
patience_count += 1
if patience_count == self.args.patience:
print(f'Early stopping at epoch {epoch} ...')
break
scheduler.step()
# save model
if not os.path.exists(self.args.model_path):
os.mkdir(self.args.model_path)
# torch.save(self.model.state_dict(), f'{self.args.model_path}/model-last.pth')
torch.save(self.model.module.state_dict(), f'{self.args.model_path}/model-last.pth')
# save labels
labels = train_loader.dataset.classes
with open(f'{self.args.model_path}/labels.csv', 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labels)
# print out evaluation report
print("Validation report after training:")
try:
embeddings, predictions = self.encode(val_loader, self.args.model_path + "/model-last.pth")
gt_classes, pred_classes = self.print_classification_report(val_loader, predictions)
except Exception as e:
print(e)
@torch.no_grad()
def eval(self, data_loader, load_model=False):
weights = data_loader.dataset.weights.to(self.device)
total, correct = 0, 0
total_loss = 0.0
if load_model:
self.load()
# test
for x, yt in tqdm(data_loader, desc="Evaluating model"):
x, yt = x.to(self.device), yt.to(self.device)
xi, xm, yp = self.model(x)
if self.args.regime == 'supervised':
loss = Model.ce_loss(yp, yt, weights)
elif self.args.regime == 'contrastive':
indices = torch.repeat_interleave(torch.arange(x.size(0)), self.args.views).to(self.device)
loss = Model.jsd_loss(xi, xm, indices)
elif self.args.regime == 'hybrid':
indices = torch.repeat_interleave(torch.arange(x.size(0)), self.args.views).to(self.device)
loss = Model.ce_loss(yp, yt, weights) + Model.jsd_loss(xi, xm, indices)
total_loss += loss.item()
if self.args.regime != 'contrastive':
_, yp = torch.max(yp.data, 1)
total += yt.size(0)
correct += (yp == yt).sum().item()
val_acc = 100 * correct / total if self.args.regime != 'contrastive' else .0
val_loss = total_loss / len(data_loader)
return val_acc, val_loss
@torch.no_grad()
def encode(self, data_loader, model_path):
try:
self.model.load_state_dict(torch.load(model_path))
except:
state_dict = torch.load(model_path)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'module' not in k:
k = 'module.' + k
else:
k = k.replace('features.module.', 'module.features.')
new_state_dict[k] = v
self.model.load_state_dict(new_state_dict)
self.model.eval()
emb, pred = [], []
for x, __ in tqdm(data_loader, desc='Embedding...'):
x = x.to(self.device)
__, x, y = self.model(x)
emb.append(x)
pred.append(y)
x = torch.cat(emb, 0).detach().cpu().numpy()
y = F.softmax(torch.cat(pred, 0), dim=-1)
return x, y
def save_embeddings(self, data_loader, embs, classes):
names = [Path(item).parts[-2] for item in data_loader.dataset.x]
embedding_df = pd.DataFrame(list(zip(classes, names, embs)), columns=["class_name", "part_name", "vector"])
dest = Path(self.args.model_path) / (Path(self.args.model_path).parts[-1] + '_embeddings')
os.makedirs(dest, exist_ok=True)
for class_name in tqdm(data_loader.dataset.classes, desc='Saving embeddings...'):
class_embedding = embedding_df[embedding_df['class_name'] == class_name].to_numpy()
np.save(dest / (class_name + "_embeddings"), class_embedding)
def load(self): # Does not work
print('\n==> Loading checkpoint..')
model_path = self.args.model_path + "/model-last.pth"
assert os.path.isfile(model_path), f'Error: no checkpoint file found in {model_path}!'
checkpoint = torch.load(model_path)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'])
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
optimizer.load_state_dict(checkpoint['optimizer'])
return best_acc, start_epoch, optimizer
def print_classification_report(self, encode_loader, predictions, top_k):
import matplotlib.pyplot as plt
import seaborn as sn
gt_classes = [encode_loader.dataset.classes[item] for item in encode_loader.dataset.y]
gt_classes_idx = [item for item in encode_loader.dataset.y]
if top_k == 1:
pred_classes_idx = np.argmax(predictions.detach().cpu().numpy(), axis=1)
pred_classes = [encode_loader.dataset.classes[item] for item in pred_classes_idx]
label = encode_loader.dataset.classes
print(f"f1 micro precision: {f1_score(gt_classes, pred_classes, average='micro')}")
print(classification_report(gt_classes, pred_classes, labels=label))
cf = confusion_matrix(gt_classes, pred_classes, normalize='true', labels=label)
if not os.path.exists('logs/'):
os.makedirs('logs/')
plt.figure(figsize=(24, 18))
sn.heatmap(cf, annot=False, fmt='.2f', cmap='Blues', xticklabels=label, yticklabels=label)
plt.xticks(size='xx-large', rotation=45)
plt.yticks(size='xx-large', rotation=45)
plt.tight_layout()
plt.savefig(fname=f'logs/{Path(self.args.model_path).parts[-1]}.pdf', format='pdf')
plt.show()
else:
log = pd.DataFrame(columns=['f1 micro'])
for top in top_k:
print(f"Calculating for top-{top}")
final_predictions_idx = np.argmax(predictions.detach().cpu().numpy(), axis=1)
top_predictions = torch.topk(predictions, top).indices
for i in range(len(top_predictions)):
for pred in top_predictions[i]:
if pred == gt_classes_idx[i]:
final_predictions_idx[i] = pred
pred_classes = [encode_loader.dataset.classes[item] for item in final_predictions_idx]
label = encode_loader.dataset.classes
f1_micro = f1_score(gt_classes, pred_classes, average='micro')
print(f"Top-{top} f1 micro precision: {f1_micro}")
# print(classification_report(gt_classes, pred_classes, labels=label))
log = log.append( | pd.Series({'f1 micro': f1_micro}, name=top) | pandas.Series |
"""
Get tweets from mongodb in a dataframe
"""
import pymongo
import os
import pandas as pd
import datetime
import numpy as np
connectionfile = open("connectionstring")
connectionstring = connectionfile.read()
connectionfile.close()
client = pymongo.MongoClient(connectionstring)
db = client["twitter"]
afilter = {"compound": {"$exists": True}}
def get_as_dataframe(stocks):
dfs = dict.fromkeys(stocks)
for stock in stocks:
df = (pd.DataFrame.from_records(db[stock].find(afilter)))
#ldf = df.loc[(df["compound"] >= 0.025)]
#gdf = df.loc[df["compound"] <= -0.025]
#df = pd.concat([ldf, gdf])
dfs[stock] = df
return dfs
def get_prices(stock):
afilter = {"volume": {"$exists": True}}
df = pd.DataFrame.from_records(db[stock].find(afilter))
df["date"] = df["date"].dt.tz_localize("UTC")
return df
def send_as_dataframe(dfs):
for stock, df in dfs.items():
df.to_dict('records')
for adict in df:
db[stock].update_one({'_id': adict['_id']}, {'$set': adict})
def add_weights(dfs):
for stock, df in dfs.items():
df["weight"] = df['followers']
return dfs
def convert_times(dfs):
for stock, df in dfs.items():
df["date"] = | pd.to_datetime(df["date"], format="%a %b %d %H:%M:%S %z %Y") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 12:51:57 2021
@author: Administrator
"""
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
def apply(decorator):
def decorate(cls):
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
class TA:
__version__ = "1.2"
@classmethod
def SMA(cls, ohlc: DataFrame, period: int = 41, column: str = "close") -> Series:
"""
Simple moving average - rolling mean in pandas lingo. Also known as 'MA'.
The simple moving average (SMA) is the most basic of the moving averages used for trading.
"""
return pd.Series(
ohlc[column].rolling(window=period).mean(),
name="{0} period SMA".format(period),
)
@classmethod
def SMM(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
Simple moving median, an alternative to moving average. SMA, when used to estimate the underlying trend in a time series,
is susceptible to rare events such as rapid shocks or other anomalies. A more robust estimate of the trend is the simple moving median over n time periods.
"""
return pd.Series(
ohlc[column].rolling(window=period).median(),
name="{0} period SMM".format(period),
)
@classmethod
def SSMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Smoothed simple moving average.
:param ohlc: data
:param period: range
:param column: open/close/high/low column of the DataFrame
:return: result Series
"""
return pd.Series(
ohlc[column]
.ewm(ignore_na=False, alpha=1.0 / period, min_periods=0, adjust=adjust)
.mean(),
name="{0} period SSMA".format(period),
)
@classmethod
def EMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Exponential Weighted Moving Average - Like all moving average indicators, they are much better suited for trending markets.
When the market is in a strong and sustained uptrend, the EMA indicator line will also show an uptrend and vice-versa for a down trend.
EMAs are commonly used in conjunction with other indicators to confirm significant market moves and to gauge their validity.
"""
return pd.Series(
ohlc[column].ewm(span=period, adjust=adjust).mean(),
name="{0} period EMA".format(period),
)
@classmethod
def DEMA(
cls,
ohlc: DataFrame,
period: int = 9,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
Double Exponential Moving Average - attempts to remove the inherent lag associated to Moving Averages
by placing more weight on recent values. The name suggests this is achieved by applying a double exponential
smoothing which is not the case. The name double comes from the fact that the value of an EMA (Exponential Moving Average) is doubled.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted from the previously doubled EMA.
Because EMA(EMA) is used in the calculation, DEMA needs 2 * period -1 samples to start producing values in contrast to the period
samples needed by a regular EMA
"""
DEMA = (
2 * cls.EMA(ohlc, period)
- cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
)
return pd.Series(DEMA, name="{0} period DEMA".format(period))
@classmethod
def TEMA(cls, ohlc: DataFrame, period: int = 9, adjust: bool = True) -> Series:
"""
Triple exponential moving average - attempts to remove the inherent lag associated to Moving Averages by placing more weight on recent values.
The name suggests this is achieved by applying a triple exponential smoothing which is not the case. The name triple comes from the fact that the
value of an EMA (Exponential Moving Average) is triple.
To keep it in line with the actual data and to remove the lag the value 'EMA of EMA' is subtracted 3 times from the previously tripled EMA.
Finally 'EMA of EMA of EMA' is added.
Because EMA(EMA(EMA)) is used in the calculation, TEMA needs 3 * period - 2 samples to start producing values in contrast to the period samples
needed by a regular EMA.
"""
triple_ema = 3 * cls.EMA(ohlc, period)
ema_ema_ema = (
cls.EMA(ohlc, period)
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
.ewm(ignore_na=False, span=period, adjust=adjust)
.mean()
)
TEMA = (
triple_ema
- 3 * cls.EMA(ohlc, period).ewm(span=period, adjust=adjust).mean()
+ ema_ema_ema
)
return pd.Series(TEMA, name="{0} period TEMA".format(period))
@classmethod
def TRIMA(cls, ohlc: DataFrame, period: int = 18) -> Series:
"""
The Triangular Moving Average (TRIMA) [also known as TMA] represents an average of prices,
but places weight on the middle prices of the time period.
The calculations double-smooth the data using a window width that is one-half the length of the series.
source: https://www.thebalance.com/triangular-moving-average-tma-description-and-uses-1031203
"""
SMA = cls.SMA(ohlc, period).rolling(window=period).sum()
return pd.Series(SMA / period, name="{0} period TRIMA".format(period))
@classmethod
def TRIX(
cls,
ohlc: DataFrame,
period: int = 20,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
The TRIX indicator calculates the rate of change of a triple exponential moving average.
The values oscillate around zero. Buy/sell signals are generated when the TRIX crosses above/below zero.
A (typically) 9 period exponential moving average of the TRIX can be used as a signal line.
A buy/sell signals are generated when the TRIX crosses above/below the signal line and is also above/below zero.
The TRIX was developed by <NAME>, publisher of Technical Analysis of Stocks & Commodities magazine,
and was introduced in Volume 1, Number 5 of that magazine.
"""
data = ohlc[column]
def _ema(data, period, adjust):
return pd.Series(data.ewm(span=period, adjust=adjust).mean())
m = _ema(_ema(_ema(data, period, adjust), period, adjust), period, adjust)
return pd.Series(100 * (m.diff() / m), name="{0} period TRIX".format(period))
@classmethod
def LWMA(cls, ohlc: DataFrame, period: int, column: str = "close") -> Series:
"""
Linear Weighted Moving Average
"""
raise NotImplementedError
@classmethod
def VAMA(cls, ohlcv: DataFrame, period: int = 8, column: str = "close") -> Series:
"""
Volume Adjusted Moving Average
"""
vp = ohlcv["volume"] * ohlcv[column]
volsum = ohlcv["volume"].rolling(window=period).mean()
volRatio = pd.Series(vp / volsum, name="VAMA")
cumSum = (volRatio * ohlcv[column]).rolling(window=period).sum()
cumDiv = volRatio.rolling(window=period).sum()
return pd.Series(cumSum / cumDiv, name="{0} period VAMA".format(period))
@classmethod
def VIDYA(
cls,
ohlcv: DataFrame,
period: int = 9,
smoothing_period: int = 12,
column: str = "close",
) -> Series:
""" Vidya (variable index dynamic average) indicator is a modification of the traditional Exponential Moving Average (EMA) indicator.
The main difference between EMA and Vidya is in the way the smoothing factor F is calculated.
In EMA the smoothing factor is a constant value F=2/(period+1);
in Vidya the smoothing factor is variable and depends on bar-to-bar price movements."""
raise NotImplementedError
@classmethod
def ER(cls, ohlc: DataFrame, period: int = 10, column: str = "close") -> Series:
"""The Kaufman Efficiency indicator is an oscillator indicator that oscillates between +100 and -100, where zero is the center point.
+100 is upward forex trending market and -100 is downwards trending markets."""
change = ohlc[column].diff(period).abs()
volatility = ohlc[column].diff().abs().rolling(window=period).sum()
return pd.Series(change / volatility, name="{0} period ER".format(period))
@classmethod
def KAMA(
cls,
ohlc: DataFrame,
er: int = 10,
ema_fast: int = 2,
ema_slow: int = 30,
period: int = 20,
column: str = "close",
) -> Series:
"""Developed by <NAME>, Kaufman's Adaptive Moving Average (KAMA) is a moving average designed to account for market noise or volatility.
Its main advantage is that it takes into consideration not just the direction, but the market volatility as well."""
er = cls.ER(ohlc, er)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
) ## smoothing constant
sma = pd.Series(
ohlc[column].rolling(period).mean(), name="SMA"
) ## first KAMA is SMA
kama = []
# Current KAMA = Prior KAMA + smoothing_constant * (Price - Prior KAMA)
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), ohlc[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name="{0} period KAMA.".format(period)
) ## apply the kama list to existing index
return sma["KAMA"]
@classmethod
def ZLEMA(
cls,
ohlc: DataFrame,
period: int = 26,
adjust: bool = True,
column: str = "close",
) -> Series:
"""ZLEMA is an abbreviation of Zero Lag Exponential Moving Average. It was developed by <NAME> and <NAME>.
ZLEMA is a kind of Exponential moving average but its main idea is to eliminate the lag arising from the very nature of the moving averages
and other trend following indicators. As it follows price closer, it also provides better price averaging and responds better to price swings."""
lag = (period - 1) / 2
ema = pd.Series(
(ohlc[column] + (ohlc[column].diff(lag))),
name="{0} period ZLEMA.".format(period),
)
zlema = pd.Series(
ema.ewm(span=period, adjust=adjust).mean(),
name="{0} period ZLEMA".format(period),
)
return zlema
@classmethod
def WMA(cls, ohlc: DataFrame, period: int = 9, column: str = "close") -> Series:
"""
WMA stands for weighted moving average. It helps to smooth the price curve for better trend identification.
It places even greater importance on recent data than the EMA does.
:period: Specifies the number of Periods used for WMA calculation
"""
d = (period * (period + 1)) / 2 # denominator
weights = np.arange(1, period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_close = ohlc[column].rolling(period, min_periods=period)
wma = _close.apply(linear(weights), raw=True)
return pd.Series(wma, name="{0} period WMA.".format(period))
@classmethod
def HMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""
HMA indicator is a common abbreviation of Hull Moving Average.
The average was developed by <NAME> and is used mainly to identify the current market trend.
Unlike SMA (simple moving average) the curve of Hull moving average is considerably smoother.
Moreover, because its aim is to minimize the lag between HMA and price it does follow the price activity much closer.
It is used especially for middle-term and long-term trading.
:period: Specifies the number of Periods used for WMA calculation
"""
import math
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = cls.WMA(ohlc, period=half_length)
wmas = cls.WMA(ohlc, period=period)
ohlc["deltawma"] = 2 * wmaf - wmas
hma = cls.WMA(ohlc, column="deltawma", period=sqrt_length)
return pd.Series(hma, name="{0} period HMA.".format(period))
@classmethod
def EVWMA(cls, ohlcv: DataFrame, period: int = 20) -> Series:
"""
The eVWMA can be looked at as an approximation to the
average price paid per share in the last n periods.
:period: Specifies the number of Periods used for eVWMA calculation
"""
vol_sum = (
ohlcv["volume"].rolling(window=period).sum()
) # floating shares in last N periods
x = (vol_sum - ohlcv["volume"]) / vol_sum
y = (ohlcv["volume"] * ohlcv["close"]) / vol_sum
evwma = [0]
# evwma = (evma[-1] * (vol_sum - volume)/vol_sum) + (volume * price / vol_sum)
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=ohlcv.index, name="{0} period EVWMA.".format(period),
)
@classmethod
def VWAP(cls, ohlcv: DataFrame) -> Series:
"""
The volume weighted average price (VWAP) is a trading benchmark used especially in pension plans.
VWAP is calculated by adding up the dollars traded for every transaction (price multiplied by number of shares traded) and then dividing
by the total shares traded for the day.
"""
return pd.Series(
((ohlcv["volume"] * cls.TP(ohlcv)).cumsum()) / ohlcv["volume"].cumsum(),
name="VWAP.",
)
@classmethod
def SMMA(
cls,
ohlc: DataFrame,
period: int = 42,
column: str = "close",
adjust: bool = True,
) -> Series:
"""The SMMA (Smoothed Moving Average) gives recent prices an equal weighting to historic prices."""
return pd.Series(
ohlc[column].ewm(alpha=1 / period, adjust=adjust).mean(), name="SMMA"
)
@classmethod
def ALMA(
cls, ohlc: DataFrame, period: int = 9, sigma: int = 6, offset: int = 0.85
) -> Series:
"""Arnaud Legoux Moving Average."""
"""dataWindow = _.last(data, period)
size = _.size(dataWindow)
m = offset * (size - 1)
s = size / sigma
sum = 0
norm = 0
for i in [size-1..0] by -1
coeff = Math.exp(-1 * (i - m) * (i - m) / 2 * s * s)
sum = sum + dataWindow[i] * coeff
norm = norm + coeff
return sum / norm"""
raise NotImplementedError
@classmethod
def MAMA(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""MESA Adaptive Moving Average"""
raise NotImplementedError
@classmethod
def FRAMA(cls, ohlc: DataFrame, period: int = 16, batch: int=10) -> Series:
"""Fractal Adaptive Moving Average
Source: http://www.stockspotter.com/Files/frama.pdf
Adopted from: https://www.quantopian.com/posts/frama-fractal-adaptive-moving-average-in-python
:period: Specifies the number of periods used for FRANA calculation
:batch: Specifies the size of batches used for FRAMA calculation
"""
assert period % 2 == 0, print("FRAMA period must be even")
c = ohlc.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=ohlc.index, name="{0} period FRAMA.".format(period))
@classmethod
def MACD(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
MACD, MACD Signal and MACD difference.
The MACD Line oscillates above and below the zero line, which is also known as the centerline.
These crossovers signal that the 12-day EMA has crossed the 26-day EMA. The direction, of course, depends on the direction of the moving average cross.
Positive MACD indicates that the 12-day EMA is above the 26-day EMA. Positive values increase as the shorter EMA diverges further from the longer EMA.
This means upside momentum is increasing. Negative MACD values indicates that the 12-day EMA is below the 26-day EMA.
Negative values increase as the shorter EMA diverges further below the longer EMA. This means downside momentum is increasing.
Signal line crossovers are the most common MACD signals. The signal line is a 9-day EMA of the MACD Line.
As a moving average of the indicator, it trails the MACD and makes it easier to spot MACD turns.
A bullish crossover occurs when the MACD turns up and crosses above the signal line.
A bearish crossover occurs when the MACD turns down and crosses below the signal line.
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
MACD = pd.Series(EMA_fast - EMA_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def PPO(
cls,
ohlc: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
Percentage Price Oscillator
PPO, PPO Signal and PPO difference.
As with MACD, the PPO reflects the convergence and divergence of two moving averages.
While MACD measures the absolute difference between two moving averages, PPO makes this a relative value by dividing the difference by the slower moving average
"""
EMA_fast = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
ohlc[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
PPO = pd.Series(((EMA_fast - EMA_slow) / EMA_slow) * 100, name="PPO")
PPO_signal = pd.Series(
PPO.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
PPO_histo = pd.Series(PPO - PPO_signal, name="HISTO")
return pd.concat([PPO, PPO_signal, PPO_histo], axis=1)
@classmethod
def VW_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 12,
period_slow: int = 26,
signal: int = 9,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
""""Volume-Weighted MACD" is an indicator that shows how a volume-weighted moving average can be used to calculate moving average convergence/divergence (MACD).
This technique was first used by <NAME>, CMT, and has been written about since at least 2002."""
vp = ohlcv["volume"] * ohlcv[column]
_fast = pd.Series(
(vp.ewm(ignore_na=False, span=period_fast, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_fast, adjust=adjust)
.mean()
),
name="_fast",
)
_slow = pd.Series(
(vp.ewm(ignore_na=False, span=period_slow, adjust=adjust).mean())
/ (
ohlcv["volume"]
.ewm(ignore_na=False, span=period_slow, adjust=adjust)
.mean()
),
name="_slow",
)
MACD = pd.Series(_fast - _slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def EV_MACD(
cls,
ohlcv: DataFrame,
period_fast: int = 20,
period_slow: int = 40,
signal: int = 9,
adjust: bool = True,
) -> DataFrame:
"""
Elastic Volume Weighted MACD is a variation of standard MACD,
calculated using two EVWMA's.
:period_slow: Specifies the number of Periods used for the slow EVWMA calculation
:period_fast: Specifies the number of Periods used for the fast EVWMA calculation
:signal: Specifies the number of Periods used for the signal calculation
"""
evwma_slow = cls.EVWMA(ohlcv, period_slow)
evwma_fast = cls.EVWMA(ohlcv, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
@classmethod
def MOM(cls, ohlc: DataFrame, period: int = 10, column: str = "close") -> Series:
"""Market momentum is measured by continually taking price differences for a fixed time interval.
To construct a 10-day momentum line, simply subtract the closing price 10 days ago from the last closing price.
This positive or negative value is then plotted around a zero line."""
return pd.Series(ohlc[column].diff(period), name="MOM".format(period))
@classmethod
def ROC(cls, ohlc: DataFrame, period: int = 12, column: str = "close") -> Series:
"""The Rate-of-Change (ROC) indicator, which is also referred to as simply Momentum,
is a pure momentum oscillator that measures the percent change in price from one period to the next.
The ROC calculation compares the current price with the price “n” periods ago."""
return pd.Series(
(ohlc[column].diff(period) / ohlc[column].shift(period)) * 100, name="ROC"
)
@classmethod
def VBM(
cls,
ohlc: DataFrame,
roc_period: int = 12,
atr_period: int = 26,
column: str = "close",
) -> Series:
"""The Volatility-Based-Momentum (VBM) indicator, The calculation for a volatility based momentum (VBM)
indicator is very similar to ROC, but divides by the security’s historical volatility instead.
The average true range indicator (ATR) is used to compute historical volatility.
VBM(n,v) = (Close — Close n periods ago) / ATR(v periods)
"""
return pd.Series(
(
(ohlc[column].diff(roc_period) - ohlc[column].shift(roc_period))
/ cls.ATR(ohlc, atr_period)
),
name="VBM",
)
@classmethod
def RSI(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.
RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when above 70 and oversold when below 30.
Signals can also be generated by looking for divergences, failure swings and centerline crossovers.
RSI can also be used to identify the general trend."""
## get the price diff
delta = ohlc[column].diff()
## positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# EMAs of ups and downs
_gain = up.ewm(alpha=1.0 / period, adjust=adjust).mean()
_loss = down.abs().ewm(alpha=1.0 / period, adjust=adjust).mean()
RS = _gain / _loss
return pd.Series(100 - (100 / (1 + RS)), name="{0} period RSI".format(period))
@classmethod
def IFT_RSI(
cls,
ohlc: DataFrame,
column: str = "close",
rsi_period: int = 5,
wma_period: int = 9,
) -> Series:
"""Modified Inverse Fisher Transform applied on RSI.
Suggested method to use any IFT indicator is to buy when the indicator crosses over –0.5 or crosses over +0.5
if it has not previously crossed over –0.5 and to sell short when the indicators crosses under +0.5 or crosses under –0.5
if it has not previously crossed under +0.5."""
# v1 = .1 * (rsi - 50)
v1 = pd.Series(0.1 * (cls.RSI(ohlc, rsi_period) - 50), name="v1")
# v2 = WMA(wma_period) of v1
d = (wma_period * (wma_period + 1)) / 2 # denominator
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
ift = pd.Series(((v2 ** 2 - 1) / (v2 ** 2 + 1)), name="IFT_RSI")
return ift
@classmethod
def SWI(cls, ohlc: DataFrame, period: int = 16) -> Series:
"""Sine Wave indicator"""
raise NotImplementedError
@classmethod
def DYMI(
cls, ohlc: DataFrame, column: str = "close", adjust: bool = True
) -> Series:
"""
The Dynamic Momentum Index is a variable term RSI. The RSI term varies from 3 to 30. The variable
time period makes the RSI more responsive to short-term moves. The more volatile the price is,
the shorter the time period is. It is interpreted in the same way as the RSI, but provides signals earlier.
Readings below 30 are considered oversold, and levels over 70 are considered overbought. The indicator
oscillates between 0 and 100.
https://www.investopedia.com/terms/d/dynamicmomentumindex.asp
"""
def _get_time(close):
# Value available from 14th period
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = ohlc.iloc[0:index]
else:
subset = ohlc.iloc[(index - time) : index]
return cls.RSI(subset, period=time, adjust=adjust).values[-1]
dates = Series(ohlc.index)
periods = Series(range(14, len(dates)), index=ohlc.index[14:].values)
t = _get_time(ohlc[column])
return periods.map(lambda x: _dmi(x))
@classmethod
def TR(cls, ohlc: DataFrame) -> Series:
"""True Range is the maximum of three price ranges.
Most recent period's high minus the most recent period's low.
Absolute value of the most recent period's high minus the previous close.
Absolute value of the most recent period's low minus the previous close."""
TR1 = pd.Series(ohlc["high"] - ohlc["low"]).abs() # True Range = High less Low
TR2 = pd.Series(
ohlc["high"] - ohlc["close"].shift()
).abs() # True Range = High less Previous Close
TR3 = pd.Series(
ohlc["close"].shift() - ohlc["low"]
).abs() # True Range = Previous Close less Low
_TR = pd.concat([TR1, TR2, TR3], axis=1)
_TR["TR"] = _TR.max(axis=1)
return pd.Series(_TR["TR"], name="TR")
@classmethod
def ATR(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Average True Range is moving average of True Range."""
TR = cls.TR(ohlc)
return pd.Series(
TR.rolling(center=False, window=period).mean(),
name="{0} period ATR".format(period),
)
@classmethod
def SAR(cls, ohlc: DataFrame, af: int = 0.02, amax: int = 0.2) -> Series:
"""SAR stands for “stop and reverse,” which is the actual indicator used in the system.
SAR trails price as the trend extends over time. The indicator is below prices when prices are rising and above prices when prices are falling.
In this regard, the indicator stops and reverses when the price trend reverses and breaks above or below the indicator."""
high, low = ohlc.high, ohlc.low
# Starting values
sig0, xpt0, af0 = True, high[0], af
_sar = [low[0] - (high - low).std()]
for i in range(1, len(ohlc)):
sig1, xpt1, af1 = sig0, xpt0, af0
lmin = min(low[i - 1], low[i])
lmax = max(high[i - 1], high[i])
if sig1:
sig0 = low[i] > _sar[-1]
xpt0 = max(lmax, xpt1)
else:
sig0 = high[i] >= _sar[-1]
xpt0 = min(lmin, xpt1)
if sig0 == sig1:
sari = _sar[-1] + (xpt1 - _sar[-1]) * af1
af0 = min(amax, af1 + af)
if sig0:
af0 = af0 if xpt0 > xpt1 else af1
sari = min(sari, lmin)
else:
af0 = af0 if xpt0 < xpt1 else af1
sari = max(sari, lmax)
else:
af0 = af
sari = xpt0
_sar.append(sari)
return pd.Series(_sar, index=ohlc.index)
@classmethod
def PSAR(cls, ohlc: DataFrame, iaf: int = 0.02, maxaf: int = 0.2) -> DataFrame:
"""
The parabolic SAR indicator, developed by <NAME>, is used by traders to determine trend direction and potential reversals in price.
The indicator uses a trailing stop and reverse method called "SAR," or stop and reverse, to identify suitable exit and entry points.
Traders also refer to the indicator as the parabolic stop and reverse, parabolic SAR, or PSAR.
https://www.investopedia.com/terms/p/parabolicindicator.asp
https://virtualizedfrog.wordpress.com/2014/12/09/parabolic-sar-implementation-in-python/
"""
length = len(ohlc)
high, low, close = ohlc.high, ohlc.low, ohlc.close
psar = close[0 : len(close)]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
hp = high[0]
lp = low[0]
for i in range(2, length):
if bull:
psar[i] = psar[i - 1] + af * (hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af * (lp - psar[i - 1])
reverse = False
if bull:
if low[i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = low[i]
af = iaf
else:
if high[i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = high[i]
af = iaf
if not reverse:
if bull:
if high[i] > hp:
hp = high[i]
af = min(af + iaf, maxaf)
if low[i - 1] < psar[i]:
psar[i] = low[i - 1]
if low[i - 2] < psar[i]:
psar[i] = low[i - 2]
else:
if low[i] < lp:
lp = low[i]
af = min(af + iaf, maxaf)
if high[i - 1] > psar[i]:
psar[i] = high[i - 1]
if high[i - 2] > psar[i]:
psar[i] = high[i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
psar = pd.Series(psar, name="psar", index=ohlc.index)
psarbear = pd.Series(psarbull, name="psarbull", index=ohlc.index)
psarbull = pd.Series(psarbear, name="psarbear", index=ohlc.index)
return pd.concat([psar, psarbull, psarbear], axis=1)
@classmethod
def BBANDS(
cls,
ohlc: DataFrame,
period: int = 20,
MA: Series = None,
column: str = "close",
std_multiplier: float = 2,
) -> DataFrame:
"""
Developed by <NAME>, Bollinger Bands® are volatility bands placed above and below a moving average.
Volatility is based on the standard deviation, which changes as volatility increases and decreases.
The bands automatically widen when volatility increases and narrow when volatility decreases.
This method allows input of some other form of moving average like EMA or KAMA around which BBAND will be formed.
Pass desired moving average as <MA> argument. For example BBANDS(MA=TA.KAMA(20)).
"""
std = ohlc[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(cls.SMA(ohlc, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
@classmethod
def MOBO(
cls,
ohlc: DataFrame,
period: int = 10,
std_multiplier: float = 0.8,
column: str = "close",
) -> DataFrame:
"""
"MOBO bands are based on a zone of 0.80 standard deviation with a 10 period look-back"
If the price breaks out of the MOBO band it can signify a trend move or price spike
Contains 42% of price movements(noise) within bands.
"""
BB = TA.BBANDS(ohlc, period=10, std_multiplier=0.8, column=column)
return BB
@classmethod
def BBWIDTH(
cls, ohlc: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
"""Bandwidth tells how wide the Bollinger Bands are on a normalized basis."""
BB = TA.BBANDS(ohlc, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
@classmethod
def PERCENT_B(
cls, ohlc: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
"""
%b (pronounced 'percent b') is derived from the formula for Stochastics and shows where price is in relation to the bands.
%b equals 1 at the upper band and 0 at the lower band.
"""
BB = TA.BBANDS(ohlc, period, MA, column)
percent_b = pd.Series(
(ohlc["close"] - BB["BB_LOWER"]) / (BB["BB_UPPER"] - BB["BB_LOWER"]),
name="%b",
)
return percent_b
@classmethod
def KC(
cls,
ohlc: DataFrame,
period: int = 20,
atr_period: int = 10,
MA: Series = None,
kc_mult: float = 2,
) -> DataFrame:
"""Keltner Channels [KC] are volatility-based envelopes set above and below an exponential moving average.
This indicator is similar to Bollinger Bands, which use the standard deviation to set the bands.
Instead of using the standard deviation, Keltner Channels use the Average True Range (ATR) to set channel distance.
The channels are typically set two Average True Range values above and below the 20-day EMA.
The exponential moving average dictates direction and the Average True Range sets channel width.
Keltner Channels are a trend following indicator used to identify reversals with channel breakouts and channel direction.
Channels can also be used to identify overbought and oversold levels when the trend is flat."""
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(cls.EMA(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * cls.ATR(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * cls.ATR(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
@classmethod
def DO(
cls, ohlc: DataFrame, upper_period: int = 20, lower_period: int = 5
) -> DataFrame:
"""Donchian Channel, a moving average indicator developed by <NAME>.
It plots the highest high and lowest low over the last period time intervals."""
upper = pd.Series(
ohlc["high"].rolling(center=False, window=upper_period).max(), name="UPPER"
)
lower = pd.Series(
ohlc["low"].rolling(center=False, window=lower_period).min(), name="LOWER"
)
middle = pd.Series((upper + lower) / 2, name="MIDDLE")
return pd.concat([lower, middle, upper], axis=1)
@classmethod
def DMI(cls, ohlc: DataFrame, period: int = 14, adjust: bool = True) -> DataFrame:
"""The directional movement indicator (also known as the directional movement index - DMI) is a valuable tool
for assessing price direction and strength. This indicator was created in 1978 by <NAME>, who also created the popular
relative strength index. DMI tells you when to be long or short.
It is especially useful for trend trading strategies because it differentiates between strong and weak trends,
allowing the trader to enter only the strongest trends.
source: https://www.tradingview.com/wiki/Directional_Movement_(DMI)#CALCULATION
:period: Specifies the number of Periods used for DMI calculation
"""
ohlc["up_move"] = ohlc["high"].diff()
ohlc["down_move"] = -ohlc["low"].diff()
# positive Dmi
def _dmp(row):
if row["up_move"] > row["down_move"] and row["up_move"] > 0:
return row["up_move"]
else:
return 0
# negative Dmi
def _dmn(row):
if row["down_move"] > row["up_move"] and row["down_move"] > 0:
return row["down_move"]
else:
return 0
ohlc["plus"] = ohlc.apply(_dmp, axis=1)
ohlc["minus"] = ohlc.apply(_dmn, axis=1)
diplus = pd.Series(
100
* (ohlc["plus"] / cls.ATR(ohlc, period))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="DI+",
)
diminus = pd.Series(
100
* (ohlc["minus"] / cls.ATR(ohlc, period))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="DI-",
)
return pd.concat([diplus, diminus], axis=1)
@classmethod
def ADX(cls, ohlc: DataFrame, period: int = 14, adjust: bool = True) -> Series:
"""The A.D.X. is 100 * smoothed moving average of absolute value (DMI +/-) divided by (DMI+ + DMI-). ADX does not indicate trend direction or momentum,
only trend strength. Generally, A.D.X. readings below 20 indicate trend weakness,
and readings above 40 indicate trend strength. An extremely strong trend is indicated by readings above 50"""
dmi = cls.DMI(ohlc, period)
return pd.Series(
100
* (abs(dmi["DI+"] - dmi["DI-"]) / (dmi["DI+"] + dmi["DI-"]))
.ewm(alpha=1 / period, adjust=adjust)
.mean(),
name="{0} period ADX.".format(period),
)
@classmethod
def PIVOT(cls, ohlc: DataFrame) -> DataFrame:
"""
Pivot Points are significant support and resistance levels that can be used to determine potential trades.
The pivot points come as a technical analysis indicator calculated using a financial instrument’s high, low, and close value.
The pivot point’s parameters are usually taken from the previous day’s trading range.
This means you’ll have to use the previous day’s range for today’s pivot points.
Or, last week’s range if you want to calculate weekly pivot points or, last month’s range for monthly pivot points and so on.
"""
df = ohlc.shift() # pivot is calculated of the previous trading session
pivot = pd.Series(cls.TP(df), name="pivot") # pivot is basically a lagging TP
s1 = (pivot * 2) - df["high"]
s2 = pivot - (df["high"] - df["low"])
s3 = df["low"] - (2 * (df["high"] - pivot))
s4 = df["low"] - (3 * (df["high"] - pivot))
r1 = (pivot * 2) - df["low"]
r2 = pivot + (df["high"] - df["low"])
r3 = df["high"] + (2 * (pivot - df["low"]))
r4 = df["high"] + (3 * (pivot - df["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
@classmethod
def PIVOT_FIB(cls, ohlc: DataFrame) -> DataFrame:
"""
Fibonacci pivot point levels are determined by first calculating the classic pivot point,
then multiply the previous day’s range with its corresponding Fibonacci level.
Most traders use the 38.2%, 61.8% and 100% retracements in their calculations.
"""
df = ohlc.shift()
pp = pd.Series(cls.TP(df), name="pivot") # classic pivot
r4 = pp + ((df["high"] - df["low"]) * 1.382)
r3 = pp + ((df["high"] - df["low"]) * 1)
r2 = pp + ((df["high"] - df["low"]) * 0.618)
r1 = pp + ((df["high"] - df["low"]) * 0.382)
s1 = pp - ((df["high"] - df["low"]) * 0.382)
s2 = pp - ((df["high"] - df["low"]) * 0.618)
s3 = pp - ((df["high"] - df["low"]) * 1)
s4 = pp - ((df["high"] - df["low"]) * 1.382)
return pd.concat(
[
pp,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
@classmethod
def STOCH(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Stochastic oscillator %K
The stochastic oscillator is a momentum indicator comparing the closing price of a security
to the range of its prices over a certain period of time.
The sensitivity of the oscillator to market movements is reducible by adjusting that time
period or by taking a moving average of the result.
"""
highest_high = ohlc["high"].rolling(center=False, window=period).max()
lowest_low = ohlc["low"].rolling(center=False, window=period).min()
STOCH = pd.Series(
(ohlc["close"] - lowest_low) / (highest_high - lowest_low) * 100,
name="{0} period STOCH %K".format(period),
)
return STOCH
@classmethod
def STOCHD(cls, ohlc: DataFrame, period: int = 3, stoch_period: int = 14) -> Series:
"""Stochastic oscillator %D
STOCH%D is a 3 period simple moving average of %K.
"""
return pd.Series(
cls.STOCH(ohlc, stoch_period).rolling(center=False, window=period).mean(),
name="{0} period STOCH %D.".format(period),
)
@classmethod
def STOCHRSI(
cls, ohlc: DataFrame, rsi_period: int = 14, stoch_period: int = 14
) -> Series:
"""StochRSI is an oscillator that measures the level of RSI relative to its high-low range over a set time period.
StochRSI applies the Stochastics formula to RSI values, instead of price values. This makes it an indicator of an indicator.
The result is an oscillator that fluctuates between 0 and 1."""
rsi = cls.RSI(ohlc, rsi_period)
return pd.Series(
((rsi - rsi.min()) / (rsi.max() - rsi.min()))
.rolling(window=stoch_period)
.mean(),
name="{0} period stochastic RSI.".format(rsi_period),
)
@classmethod
def WILLIAMS(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""Williams %R, or just %R, is a technical analysis oscillator showing the current closing price in relation to the high and low
of the past N days (for a given N). It was developed by a publisher and promoter of trading materials, <NAME>.
Its purpose is to tell whether a stock or commodity market is trading near the high or the low, or somewhere in between,
of its recent trading range.
The oscillator is on a negative scale, from −100 (lowest) up to 0 (highest).
"""
highest_high = ohlc["high"].rolling(center=False, window=period).max()
lowest_low = ohlc["low"].rolling(center=False, window=period).min()
WR = pd.Series(
(highest_high - ohlc["close"]) / (highest_high - lowest_low),
name="{0} Williams %R".format(period),
)
return WR * -100
@classmethod
def UO(cls, ohlc: DataFrame, column: str = "close") -> Series:
"""Ultimate Oscillator is a momentum oscillator designed to capture momentum across three different time frames.
The multiple time frame objective seeks to avoid the pitfalls of other oscillators.
Many momentum oscillators surge at the beginning of a strong advance and then form bearish divergence as the advance continues.
This is because they are stuck with one time frame. The Ultimate Oscillator attempts to correct this fault by incorporating longer
time frames into the basic formula."""
k = [] # current low or past close
for row, _row in zip(ohlc.itertuples(), ohlc.shift(1).itertuples()):
k.append(min(row.low, _row.close))
bp = pd.Series(ohlc[column] - k, name="bp") # Buying pressure
Average7 = bp.rolling(window=7).sum() / cls.TR(ohlc).rolling(window=7).sum()
Average14 = bp.rolling(window=14).sum() / cls.TR(ohlc).rolling(window=14).sum()
Average28 = bp.rolling(window=28).sum() / cls.TR(ohlc).rolling(window=28).sum()
return pd.Series(
(100 * ((4 * Average7) + (2 * Average14) + Average28)) / (4 + 2 + 1)
)
@classmethod
def AO(cls, ohlc: DataFrame, slow_period: int = 34, fast_period: int = 5) -> Series:
"""'EMA',
Awesome Oscillator is an indicator used to measure market momentum. AO calculates the difference of a 34 Period and 5 Period Simple Moving Averages.
The Simple Moving Averages that are used are not calculated using closing price but rather each bar's midpoints.
AO is generally used to affirm trends or to anticipate possible reversals. """
slow = pd.Series(
((ohlc["high"] + ohlc["low"]) / 2).rolling(window=slow_period).mean(),
name="slow_AO",
)
fast = pd.Series(
((ohlc["high"] + ohlc["low"]) / 2).rolling(window=fast_period).mean(),
name="fast_AO",
)
return pd.Series(fast - slow, name="AO")
@classmethod
def MI(cls, ohlc: DataFrame, period: int = 9, adjust: bool = True) -> Series:
"""Developed by <NAME>, the Mass Index uses the high-low range to identify trend reversals based on range expansions.
In this sense, the Mass Index is a volatility indicator that does not have a directional bias.
Instead, the Mass Index identifies range bulges that can foreshadow a reversal of the current trend."""
_range = pd.Series(ohlc["high"] - ohlc["low"], name="range")
EMA9 = _range.ewm(span=period, ignore_na=False, adjust=adjust).mean()
DEMA9 = EMA9.ewm(span=period, ignore_na=False, adjust=adjust).mean()
mass = EMA9 / DEMA9
return pd.Series(mass.rolling(window=25).sum(), name="Mass Index")
@classmethod
def BOP(cls, ohlc: DataFrame) -> Series:
"""Balance Of Power indicator"""
return pd.Series(
(ohlc.close - ohlc.open) / (ohlc.high - ohlc.low), name="Balance Of Power"
)
@classmethod
def VORTEX(cls, ohlc: DataFrame, period: int = 14) -> DataFrame:
"""The Vortex indicator plots two oscillating lines, one to identify positive trend movement and the other
to identify negative price movement.
Indicator construction revolves around the highs and lows of the last two days or periods.
The distance from the current high to the prior low designates positive trend movement while the
distance between the current low and the prior high designates negative trend movement.
Strongly positive or negative trend movements will show a longer length between the two numbers while
weaker positive or negative trend movement will show a shorter length."""
VMP = pd.Series((ohlc["high"] - ohlc["low"].shift()).abs())
VMM = pd.Series((ohlc["low"] - ohlc["high"].shift()).abs())
VMPx = VMP.rolling(window=period).sum()
VMMx = VMM.rolling(window=period).sum()
TR = cls.TR(ohlc).rolling(window=period).sum()
VIp = pd.Series(VMPx / TR, name="VIp").interpolate(method="index")
VIm = pd.Series(VMMx / TR, name="VIm").interpolate(method="index")
return pd.concat([VIm, VIp], axis=1)
@classmethod
def KST(
cls, ohlc: DataFrame, r1: int = 10, r2: int = 15, r3: int = 20, r4: int = 30
) -> DataFrame:
"""Know Sure Thing (KST) is a momentum oscillator based on the smoothed rate-of-change for four different time frames.
KST measures price momentum for four different price cycles. It can be used just like any momentum oscillator.
Chartists can look for divergences, overbought/oversold readings, signal line crossovers and centerline crossovers."""
r1 = cls.ROC(ohlc, r1).rolling(window=10).mean()
r2 = cls.ROC(ohlc, r2).rolling(window=10).mean()
r3 = cls.ROC(ohlc, r3).rolling(window=10).mean()
r4 = cls.ROC(ohlc, r4).rolling(window=15).mean()
k = pd.Series((r1 * 1) + (r2 * 2) + (r3 * 3) + (r4 * 4), name="KST")
signal = pd.Series(k.rolling(window=10).mean(), name="signal")
return pd.concat([k, signal], axis=1)
@classmethod
def TSI(
cls,
ohlc: DataFrame,
long: int = 25,
short: int = 13,
signal: int = 13,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""True Strength Index (TSI) is a momentum oscillator based on a double smoothing of price changes."""
## Double smoother price change
momentum = pd.Series(ohlc[column].diff()) ## 1 period momentum
_EMA25 = pd.Series(
momentum.ewm(span=long, min_periods=long - 1, adjust=adjust).mean(),
name="_price change EMA25",
)
_DEMA13 = pd.Series(
_EMA25.ewm(span=short, min_periods=short - 1, adjust=adjust).mean(),
name="_price change double smoothed DEMA13",
)
## Double smoothed absolute price change
absmomentum = pd.Series(ohlc[column].diff().abs())
_aEMA25 = pd.Series(
absmomentum.ewm(span=long, min_periods=long - 1, adjust=adjust).mean(),
name="_abs_price_change EMA25",
)
_aDEMA13 = pd.Series(
_aEMA25.ewm(span=short, min_periods=short - 1, adjust=adjust).mean(),
name="_abs_price_change double smoothed DEMA13",
)
TSI = pd.Series((_DEMA13 / _aDEMA13) * 100, name="TSI")
signal = pd.Series(
TSI.ewm(span=signal, min_periods=signal - 1, adjust=adjust).mean(),
name="signal",
)
return pd.concat([TSI, signal], axis=1)
@classmethod
def TP(cls, ohlc: DataFrame) -> Series:
"""Typical Price refers to the arithmetic average of the high, low, and closing prices for a given period."""
return pd.Series((ohlc["high"] + ohlc["low"] + ohlc["close"]) / 3, name="TP")
@classmethod
def ADL(cls, ohlcv: DataFrame) -> Series:
"""The accumulation/distribution line was created by <NAME> to determine the flow of money into or out of a security.
It should not be confused with the advance/decline line. While their initials might be the same, these are entirely different indicators,
and their uses are different as well. Whereas the advance/decline line can provide insight into market movements,
the accumulation/distribution line is of use to traders looking to measure buy/sell pressure on a security or confirm the strength of a trend."""
MFM = pd.Series(
((ohlcv["close"] - ohlcv["low"])
- (ohlcv["high"] - ohlcv["close"])) / (ohlcv["high"] - ohlcv["low"]),
name="MFM",
) # Money flow multiplier
MFV = pd.Series(MFM * ohlcv["volume"], name="MFV")
return MFV.cumsum()
@classmethod
def CHAIKIN(cls, ohlcv: DataFrame, adjust: bool = True) -> Series:
"""Chaikin Oscillator, named after its creator, <NAME>, the Chaikin oscillator is an oscillator that measures the accumulation/distribution
line of the moving average convergence divergence (MACD). The Chaikin oscillator is calculated by subtracting a 10-day exponential moving average (EMA)
of the accumulation/distribution line from a three-day EMA of the accumulation/distribution line, and highlights the momentum implied by the
accumulation/distribution line."""
return pd.Series(
cls.ADL(ohlcv).ewm(span=3, min_periods=2, adjust=adjust).mean()
- cls.ADL(ohlcv).ewm(span=10, min_periods=9, adjust=adjust).mean()
)
@classmethod
def MFI(cls, ohlc: DataFrame, period: int = 14) -> Series:
"""The money flow index (MFI) is a momentum indicator that measures
the inflow and outflow of money into a security over a specific period of time.
MFI can be understood as RSI adjusted for volume.
The money flow indicator is one of the more reliable indicators of overbought and oversold conditions, perhaps partly because
it uses the higher readings of 80 and 20 as compared to the RSI's overbought/oversold readings of 70 and 30"""
tp = cls.TP(ohlc)
rmf = pd.Series(tp * ohlc["volume"], name="rmf") ## Real Money Flow
_mf = pd.concat([tp, rmf], axis=1)
_mf["delta"] = _mf["TP"].diff()
def pos(row):
if row["delta"] > 0:
return row["rmf"]
else:
return 0
def neg(row):
if row["delta"] < 0:
return row["rmf"]
else:
return 0
_mf["neg"] = _mf.apply(neg, axis=1)
_mf["pos"] = _mf.apply(pos, axis=1)
mfratio = pd.Series(
_mf["pos"].rolling(window=period).sum()
/ _mf["neg"].rolling(window=period).sum()
)
return pd.Series(
100 - (100 / (1 + mfratio)), name="{0} period MFI".format(period)
)
@classmethod
def OBV(cls, ohlcv: DataFrame, column: str = "close") -> Series:
"""
On Balance Volume (OBV) measures buying and selling pressure as a cumulative indicator that adds volume on up days and subtracts volume on down days.
OBV was developed by <NAME> and introduced in his 1963 book, Granville's New Key to Stock Market Profits.
It was one of the first indicators to measure positive and negative volume flow.
Chartists can look for divergences between OBV and price to predict price movements or use OBV to confirm price trends.
source: https://en.wikipedia.org/wiki/On-balance_volume#The_formula
:param pd.DataFrame ohlc: 'open, high, low, close' pandas DataFrame
:return pd.Series: result is pandas.Series
"""
ohlcv["OBV"] = np.nan
neg_change = ohlcv[column] < ohlcv[column].shift(1)
pos_change = ohlcv[column] > ohlcv[column].shift(1)
if pos_change.any():
ohlcv.loc[pos_change, "OBV"] = ohlcv["volume"]
if neg_change.any():
ohlcv.loc[neg_change, "OBV"] = -ohlcv["volume"]
return pd.Series(ohlcv["OBV"].cumsum(), name="OBV")
@classmethod
def WOBV(cls, ohlcv: DataFrame, column: str = "close") -> Series:
"""
Weighted OBV
Can also be seen as an OBV indicator that takes the price differences into account.
In a regular OBV, a high volume bar can make a huge difference,
even if the price went up only 0.01, and it it goes down 0.01
instead, that huge volume makes the OBV go down, even though
hardly anything really happened.
"""
wobv = pd.Series(ohlcv["volume"] * ohlcv[column].diff(), name="WOBV")
return wobv.cumsum()
@classmethod
def VZO(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""VZO uses price, previous price and moving averages to compute its oscillating value.
It is a leading indicator that calculates buy and sell signals based on oversold / overbought conditions.
Oscillations between the 5% and 40% levels mark a bullish trend zone, while oscillations between -40% and 5% mark a bearish trend zone.
Meanwhile, readings above 40% signal an overbought condition, while readings above 60% signal an extremely overbought condition.
Alternatively, readings below -40% indicate an oversold condition, which becomes extremely oversold below -60%."""
sign = lambda a: (a > 0) - (a < 0)
r = ohlc[column].diff().apply(sign) * ohlc["volume"]
dvma = r.ewm(span=period, adjust=adjust).mean()
vma = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
return pd.Series(100 * (dvma / vma), name="VZO")
@classmethod
def PZO(
cls,
ohlc: DataFrame,
period: int = 14,
column: str = "close",
adjust: bool = True,
) -> Series:
"""
The formula for PZO depends on only one condition: if today's closing price is higher than yesterday's closing price,
then the closing price will have a positive value (bullish); otherwise it will have a negative value (bearish).
source: http://traders.com/Documentation/FEEDbk_docs/2011/06/Khalil.html
:period: Specifies the number of Periods used for PZO calculation
"""
sign = lambda a: (a > 0) - (a < 0)
r = ohlc[column].diff().apply(sign) * ohlc[column]
cp = pd.Series(r.ewm(span=period, adjust=adjust).mean())
tc = cls.EMA(ohlc, period)
return pd.Series(100 * (cp / tc), name="{} period PZO".format(period))
@classmethod
def EFI(
cls,
ohlcv: DataFrame,
period: int = 13,
column: str = "close",
adjust: bool = True,
) -> Series:
"""Elder's Force Index is an indicator that uses price and volume to assess the power
behind a move or identify possible turning points."""
# https://tradingsim.com/blog/elders-force-index/
fi = pd.Series(ohlcv[column].diff() * ohlcv["volume"])
return pd.Series(
fi.ewm(ignore_na=False, span=period, adjust=adjust).mean(),
name="{0} period Force Index".format(period),
)
@classmethod
def CFI(
cls, ohlcv: DataFrame, column: str = "close", adjust: bool = True
) -> Series:
"""
Cummulative Force Index.
Adopted from Elder's Force Index.
"""
fi1 = pd.Series(ohlcv["volume"] * ohlcv[column].diff())
cfi = pd.Series(
fi1.ewm(ignore_na=False, min_periods=9, span=10, adjust=adjust).mean(),
name="CFI",
)
return cfi.cumsum()
@classmethod
def EBBP(cls, ohlc: DataFrame) -> DataFrame:
"""Bull power and bear power by Dr. <NAME> show where today’s high and low lie relative to the a 13-day EMA"""
bull_power = pd.Series(ohlc["high"] - cls.EMA(ohlc, 13), name="Bull.")
bear_power = pd.Series(ohlc["low"] - cls.EMA(ohlc, 13), name="Bear.")
return pd.concat([bull_power, bear_power], axis=1)
@classmethod
def EMV(cls, ohlcv: Series, period: int = 14) -> Series:
"""Ease of Movement (EMV) is a volume-based oscillator that fluctuates above and below the zero line.
As its name implies, it is designed to measure the 'ease' of price movement.
prices are advancing with relative ease when the oscillator is in positive territory.
Conversely, prices are declining with relative ease when the oscillator is in negative territory."""
distance = pd.Series(
((ohlcv["high"] + ohlcv["low"]) / 2)
- (ohlcv["high"].shift() + ohlcv["low"].shift()) / 2
)
box_ratio = pd.Series(
(ohlcv["volume"] / 1000000) / (ohlcv["high"] - ohlcv["low"])
)
_emv = pd.Series(distance / box_ratio)
return pd.Series(
_emv.rolling(window=period).mean(), name="{0} period EMV.".format(period)
)
@classmethod
def CCI(cls, ohlc: DataFrame, period: int = 20, constant: float = 0.015) -> Series:
"""Commodity Channel Index (CCI) is a versatile indicator that can be used to identify a new trend or warn of extreme conditions.
CCI measures the current price level relative to an average price level over a given period of time.
The CCI typically oscillates above and below a zero line. Normal oscillations will occur within the range of +100 and −100.
Readings above +100 imply an overbought condition, while readings below −100 imply an oversold condition.
As with other overbought/oversold indicators, this means that there is a large probability that the price will correct to more representative levels.
source: https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
:param pd.DataFrame ohlc: 'open, high, low, close' pandas DataFrame
:period: int - number of periods to take into consideration
:factor float: the constant at .015 to ensure that approximately 70 to 80 percent of CCI values would fall between -100 and +100.
:return pd.Series: result is pandas.Series
"""
tp = cls.TP(ohlc)
tp_rolling = tp.rolling(window=period, min_periods=0)
# calculate MAD (Mean Deviation)
# https://www.khanacademy.org/math/statistics-probability/summarizing-quantitative-data/other-measures-of-spread/a/mean-absolute-deviation-mad-review
mad = tp_rolling.apply(lambda s: abs(s - s.mean()).mean(), raw=True)
return pd.Series(
(tp - tp_rolling.mean()) / (constant * mad),
name="{0} period CCI".format(period),
)
@classmethod
def COPP(cls, ohlc: DataFrame, adjust: bool = True) -> Series:
"""The Coppock Curve is a momentum indicator, it signals buying opportunities when the indicator moved from negative territory to positive territory."""
roc1 = cls.ROC(ohlc, 14)
roc2 = cls.ROC(ohlc, 11)
return pd.Series(
(roc1 + roc2).ewm(span=10, min_periods=9, adjust=adjust).mean(),
name="Coppock Curve",
)
@classmethod
def BASP(cls, ohlc: DataFrame, period: int = 40, adjust: bool = True) -> DataFrame:
"""BASP indicator serves to identify buying and selling pressure."""
sp = ohlc["high"] - ohlc["close"]
bp = ohlc["close"] - ohlc["low"]
spavg = sp.ewm(span=period, adjust=adjust).mean()
bpavg = bp.ewm(span=period, adjust=adjust).mean()
nbp = bp / bpavg
nsp = sp / spavg
varg = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
nv = ohlc["volume"] / varg
nbfraw = pd.Series(nbp * nv, name="Buy.")
nsfraw = pd.Series(nsp * nv, name="Sell.")
return pd.concat([nbfraw, nsfraw], axis=1)
@classmethod
def BASPN(cls, ohlc: DataFrame, period: int = 40, adjust: bool = True) -> DataFrame:
"""
Normalized BASP indicator
"""
sp = ohlc["high"] - ohlc["close"]
bp = ohlc["close"] - ohlc["low"]
spavg = sp.ewm(span=period, adjust=adjust).mean()
bpavg = bp.ewm(span=period, adjust=adjust).mean()
nbp = bp / bpavg
nsp = sp / spavg
varg = ohlc["volume"].ewm(span=period, adjust=adjust).mean()
nv = ohlc["volume"] / varg
nbf = pd.Series((nbp * nv).ewm(span=20, adjust=adjust).mean(), name="Buy.")
nsf = pd.Series((nsp * nv).ewm(span=20, adjust=adjust).mean(), name="Sell.")
return pd.concat([nbf, nsf], axis=1)
@classmethod
def CMO(
cls,
ohlc: DataFrame,
period: int = 9,
factor: int = 100,
column: str = "close",
adjust: bool = True,
) -> DataFrame:
"""
Chande Momentum Oscillator (CMO) - technical momentum indicator invented by the technical analyst <NAME>.
It is created by calculating the difference between the sum of all recent gains and the sum of all recent losses and then
dividing the result by the sum of all price movement over the period.
This oscillator is similar to other momentum indicators such as the Relative Strength Index and the Stochastic Oscillator
because it is range bounded (+100 and -100)."""
# get the price diff
delta = ohlc[column].diff()
# positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# EMAs of ups and downs
_gain = up.ewm(com=period, adjust=adjust).mean()
_loss = down.ewm(com=period, adjust=adjust).mean().abs()
return | pd.Series(factor * ((_gain - _loss) / (_gain + _loss)), name="CMO") | pandas.Series |
import netifaces
import pyshark
import os
import csv
import multiprocessing
import pandas as pd
import subprocess
from scipy.io import arff
import threading
import time
import random
import logging
import mysql.connector
from shutil import copyfile, rmtree
from sklearn.model_selection import train_test_split
from keras import utils as U
import tensorflow as tf
from tensorflow import keras
import MLP
import Utils as utils
import RetrainModels as rtm
import DbSetUp as dtb
import datetime
import numpy
import matplotlib.pyplot as plt
import BlockIps as bl
from pymouse import PyMouse
from datetime import date
from collections import Counter
logging._warn_preinit_stderr = 0
logging.basicConfig(filename='log/app.log', filemode='w+', format='%(process)d - %(thread)s - %(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
utils.setLogger(logger)
#root directory
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
#false positive Counter
falsePackets = 0
falseFlows = 0
#decisions Counter
flowsT = 0
flowsA = 0
packA = 0
packT = 0
#databases indicators
flowWorkingDb = 1
trafficWorkingDb = 1
#those correspond to packet model
trainingTimeout = 360
fullTrainingTimeout = 7200
#those correspond to flow models
fullFTrainingTimeout = 100000
trainingFTimeout = 3600
start = check = end = time.time()
startF = checkF = endF = time.time()
numeric_types = [int, float, complex]
modelFlow = MLP.MLP([100,100], 147)
modelPacket = MLP.MLP([10,10], 8, optimizer='rms')
trainingLock = threading.Lock()
flowLock = threading.Lock()
config = tf.ConfigProto(
device_count={'GPU': 1},
intra_op_parallelism_threads=1,
allow_soft_placement=True
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
session = tf.Session(config=config)
keras.backend.set_session(session)
'''
Get the highest_layer, transport_layer, source_Ip, destination_Ip,
Source_Port, Destination_Port, Packet_length, Packet/Time information
about a packet
'''
def get_packet_information(packet, time, count):
global logger
try:
if packet.highest_layer != 'ARP':
ip= utils.get_ip(packet)
packets_time = 0
if float(time) != 0:
packets_time = count / float(time)
try:
#'Source Port', 'Dest Port', 'Source IP', 'Dest IP', 'Packet Length','Packets/Time', 'Packet Type',
data = [ip.src, ip.dst, int(packet[packet.transport_layer].srcport),
int(packet[packet.transport_layer].dstport), int(packet.length),
packets_time, packet.highest_layer, packet.transport_layer]
return data
except AttributeError:
data = [ip.src, ip.dst, 0, 0, int(packet.length), packets_time,
packet.highest_layer, packet.transport_layer]
return data
except (UnboundLocalError, AttributeError):
ip= utils.get_ip(packet)
if ip is None:
logger.info("The packet "+ str(count) + " wasn't identified as either IPv4 or IPv6\n")
logger.info(packet)
else:
logger.info("An unknown error has occurred with packet "+ str(count) +"\n")
logger.info(packet)
return None
'''
Handle the threatening attack by blocking the source of the traffic
'''
def handleDDoS(ip, flowip, port, origin):
utils.blockIp(ip, flowip, port, origin)
'''
Check whether the packet is dangerous or not by computing the prediction
that it is a ddos attack or not
packet- packet to be analyzed
count- the count of the packet that was reached
timeRecorded - the time at which the packet was Recorded
arriveT - time at which the packet actually arrived at
db - the currently used db
'''
def check_packet(packet, count, timeRecorded, arriveT, db):
global modelPacket, logger, falsePackets, session, packT, packA
packT += 1
try:
datat = get_packet_information(packet, arriveT, count)
if datat == None:
pass
else:
protocol = utils.get_ipvx(packet)
data, nonNumeric = utils.labelEncoder(datat, 'LiveCapture')
data = pd.DataFrame(data, columns=utils.getPacketNames())
flowId = utils.getFlowId(nonNumeric[1], nonNumeric[0], int(data['Dest Port']), int(data['Source Port']), protocol)
#once done remove the first and uncomment the second
#prediction = 0
try:
with session.as_default():
with session.graph.as_default():
modelPacket.model._make_predict_function()
prediction = modelPacket.model.predict(data)
prediction = numpy.argmax(prediction[0])
packA += prediction
print()
print("This is packet "+ str(datat) )
print("This is prediction " + str(prediction))
print("Recorded "+ str(packT) +" packs ")
print("From those "+ str(packA) + " were attacks")
print()
predictedTime = time.time() - timeRecorded
#check the percentage that this packet is part of an attack
flows = pd.DataFrame()
aux = dtb.getFlowIdFCols('finalFlow',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow0',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow1',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('newFlow2',['Flow_ID','Label'],flowId,arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
packets = pd.DataFrame()
aux = dtb.getFlowIdPCols('finalPackets',['Flow_ID', 'Predict'],flowId,arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols(db,['Flow_ID', 'Predict'],flowId,arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
pred = 0
if flows.empty and packets.empty:
pred = prediction
else:
packets = packets.append(pd.DataFrame([[flowId,prediction]]), ignore_index = True)
if not flows.empty:
flows.columns = ['Flow_ID', 'Label']
else:
flows = pd.DataFrame(columns = ['Flow_ID', 'Label'])
if not packets.empty:
packets.columns = ['Flow_ID', 'Predict']
else:
packets = pd.DataFrame(columns = ['Flow_ID', 'Predict'])
pred = utils.getTargets(packets, flows)[0]
if pred != prediction:
logger.info("Found a possible false positive in packets check")
falsePackets += 1
if pred == 0 :
print("Packet Not attack")
print()
print()
insert = threading.Thread(name="check_packet"+str(count), target = dtb.insert_packet, args=(data, nonNumeric, protocol, prediction, arriveT, predictedTime, predictedTime, db))
insert.start()
insert.join()
elif pred == 1:
print("Packet Attack")
print()
print()
handleAttack = threading.Thread(target = handleDDoS, args=(nonNumeric[1], flowId,data['Source Port'], 'Packet'), daemon=True)
handleAttack.start()
handleAttack.join()
handledTime = time.time() - timeRecorded
insert = threading.Thread(name="check_packet"+str(count), target = dtb.insert_packet, args=(data, nonNumeric, protocol, prediction, arriveT, predictedTime, handledTime, db))
insert.start()
insert.join()
else:
logger.warning("There is an unexpected prediction answer "+ str(prediction))
except Exception as e:
logging.error(e)
except KeyboardInterrupt as e :
global logger
logger.info("Program interrupted")
return
'''
There exists an issue with the CICFlowMeter's conversion from captured packets
to flows; as such, only the live recording of flows is allowed (as this is not
a main part of the topic it is not to be dealt with)
'''
def check_flow(time, count):
global logger
logger.info("Flow is checked somewhere else")
'''
This function checks when a training was ended so that the used model can change
'''
def changeUsedPacketModel():
global logger
try:
training = False
global trainingLock, modelPacket
while(True):
if trainingLock.locked():
training = True
elif training == True:
modelPacket.model.load_weights('trafficModels/currentModel.h5')
print("Model was changed")
training = False
else:
pass
except KeyboardInterrupt as e:
print("Program was stopped")
return
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
this function checks if it is the time for one of the packet db to be trained
by checking if there exists any other training currently in progress and if
the time for training was reached.
'''
def checkTrainingTimesP(count):
global start, check, end, logger, fullTrainingTimeout, ROOT_DIR, trainingLock, trainingTimeout, trafficWorkingDb
if end - start >= fullTrainingTimeout:
start = check = time.time()
nameTraining = ''
#stop any active refiting
for t in multiprocessing.active_children():
if t.name in ['TrainingPacket1', 'TrainingPacket2', 'TrainingPacket0']:
nameTraining = str(t.name).replace('TrainingPacket','newPackets')
t.stop()
t.join()
#try to remove the epoch folders
if os.path.exists(ROOT_DIR + "/filePacketTrained"):
try:
rmtree(ROOT_DIR + "/filePacketTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
dtb.insertPTable(nameTraining)
logger.info("Fully retrain at count "+ str(count))
print(count)
#move all existent data in the main db
dtb.insertPTable('newPackets'+str(trafficWorkingDb))
fullTraining = rtm.retrainPacketModel(args=('finalPackets', trainingLock),
name="finalPacketsTraining", daemon=True)
logger.info("Started training a completely new model for checking packets")
fullTraining.start()
#use a new db for storing
trafficWorkingDb = (trafficWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
#if the training time is reached, check if no training is occuring
#if another training is occuring, keep on storing information
elif end - check >= trainingTimeout:
check = time.time()
logger.info("Finished working with packet "+ str(trafficWorkingDb))
#check if any database is in training
#change working database to the nontraining one
changedProcess = False
for t in multiprocessing.active_children():
if t.name == 'finalPacketsTraining':
logger.info("Currently a completely new packet model is being trained")
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
elif t.name not in ['TrainingPacket0', 'TrainingPacket1', 'TrainingPacket2']:
pass
elif t.name == ("TrainingPacket" + str((trafficWorkingDb + 1) % 3)):
trafficWorkingDb = (trafficWorkingDb + 2) % 3
changedProcess = True
break
elif t.name == ("TrainingPacket" + str((trafficWorkingDb + 2) % 3)):
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
elif t.name == ("TrainingPacket" + str(trafficWorkingDb)) :
logger.error("Error: Program has been writing in the training packet database")
trafficWorkingDb = (trafficWorkingDb + 1) % 3
changedProcess = True
break
else:
pass
#if no database is training refit the current one
if changedProcess == False:
logger.info("Partial retraining at count "+ str(count))
print("Partial at "+ str(count))
nameProcess = "TrainingPacket" + str(trafficWorkingDb)
if os.path.exists(ROOT_DIR + "/filePacketTrained"):
try:
rmtree(ROOT_DIR + "/filePacketTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
training = rtm.retrainPacketModel(args=('newPackets'+str(trafficWorkingDb), trainingLock),
name=nameProcess, daemon=True)
logger.info("Started training packet "+ str(trafficWorkingDb))
training.start()
trafficWorkingDb = (trafficWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
return
'''
capture live-traffic from selected interface into the respective
thread pcap file
The function then passes the packet onto a checker and onto a time checker, meant to
determine if the time for refitting or retraining was reached
'''
def capture_interface(iface):
global trafficWorkingDb, logger, start, check, end, falseFlows, falsePackets
#save all traffic for checking for false positives and missed values
if iface == "all":
cap = pyshark.LiveCapture(output_file="traffic.pcap")
else:
cap = pyshark.LiveCapture(interface=iface, output_file="traffic.pcap")
cap.set_debug()
packet_iterator = cap.sniff_continuously
changeUsedModel = threading.Thread(name="changeUsedPacketModel", target=changeUsedPacketModel, args=())
changeUsedModel.start()
try:
start = check = time.time()
count = 0
#for each read packet
for packet in packet_iterator():
count += 1
end = time.time()
#check if packet is a threat
arriveT = packet.frame_info.time_relative
check_packet(packet, count, end, arriveT, 'newPackets' + str(trafficWorkingDb))
#check if it is time for retraining
training = threading.Thread(name = "checkTrainingTimesP", target= checkTrainingTimesP, args=(count,))
training.start()
except Exception as e:
print(e)
except KeyboardInterrupt:
print("The number of false packets were "+ str(falsePackets))
print("The number of false flows were "+ str(falseFlows))
utils.closeVers()
cap.close()
time.sleep(1)
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is main_thread:
pass
t.join()
for t in multiprocessing.active_children():
t.stop()
t.join()
exit()
#get_flow_spec()
cap.close()
'''
this function checks if a new model was created and changes the current used one to that one
'''
def changeUsedFlowModel():
global logger
try:
training = False
global flowLock, modelFlow
while(True):
if flowLock.locked():
training = True
elif training == True:
modelFlow.model.load_weights('flowModels/currentModel.h5')
print("Model was changed")
training = False
else:
pass
except KeyboardInterrupt as e :
print("Program was stopped")
return
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
this function checks if it is the time for one of the flow db to be trained
by checking whether there is another training in execution and by checking that
the training time was reached
count - marks the number of flows reached
'''
def checkTrainingTimesF(count):
global startF, checkF, endF, fullFTrainingTimeout, logger, ROOT_DIR, trainingFTimeout, flowWorkingDb, flowLock
if endF - startF >= fullFTrainingTimeout:
startF = checkF = time.time()
nameTraining =''
#stop any refitting
for r in multiprocessing.active_children():
if t.name in ['TrainingFlow1','TrainingFlow2','TrainingFlow0']:
nameTraining = str(t.name).replace("TrainingFlow", 'newFlow')
t.stop()
t.join()
#Try to remove epoch folders
if os.path.exists(ROOT_DIR + "/fileFlowTrained"):
try:
rmtree(ROOT_DIR + 'filePacketTrained')
except OSError as e:
print ("Error: %s - %s." %(e.filename, e.strerror))
dtb.insertFTable(nameTraining)
logger.info("Fully retrain at count" + str(count))
print(count)
#move all existent data in the main db
dtb.insertFTable('newFlow' + str(flowWorkingDb))
fullTraining = rtm.retrainFlowModel(args=('finalFlow',flowLock), name ='finalFlowsTraining', daemon=True)
logger.info("Started training a completely new model for checking flows")
fullTraining.start()
#change db for storing
flowWorkingDb = (flowWorkingDb + 1) % 3
logger.info("Changed to new flow database" + str(flowWorkingDb))
#if the training time is reached check if no othr training is ocurring
#if another is happening , keep on storing information
elif endF - checkF >= trainingFTimeout:
checkF = time.time()
logger.info("Fininshed working with flow "+ str(flowWorkingDb))
#check if any db in trainingFile
#change working db to nontraining one
changedProcess = False
for t in multiprocessing.active_children():
if t.name == 'finalFlowsTraining':
logger.info("Currently a completely new flow model is being trained")
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
elif t.name not in ['TrainingFlow0', 'TrainingFlow1', 'TrainingFlow2']:
pass
elif t.name == ("TrainingFlow" + str((flowWorkingDb + 1) % 3)):
flowWorkingDb = (flowWorkingDb + 2) % 3
changedProcess = True
break
elif t.name == ("TrainingFlow" + str((flowWorkingDb + 2) % 3)):
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
elif t.name == ("TrainingFlow" + str(flowWorkingDb)) :
logger.error("Error: Program has been writing in the training packet database")
flowWorkingDb = (flowWorkingDb + 1) % 3
changedProcess = True
break
else:
pass
#if no database is in training refit the current one
if changedProcess == False:
logger.info("Partial retraining at count "+ str(count))
print("Partial at "+ str(count))
nameProcess = "Training Flow" + str(flowWorkingDb)
if os.path.exists(ROOT_DIR + "/fileFlowTrained"):
try:
rmtree(ROOT_DIR + "/fileFlowTrained")
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
training = rtm.retrainFlowModel(args=('newFlow'+str(trafficWorkingDb), trainingLock),
name=nameProcess, daemon=True)
logger.info("Started training packet "+ str(trafficWorkingDb))
training.start()
flowWorkingDb = (flowWorkingDb + 1) % 3
logger.info("Changed to new packet database "+ str(trafficWorkingDb))
'''
flow- flow to be analyzed
timeRecorded - the time the flow was read as
arriveT - the time the flow was recorded (not started to analyze) at
db - the currently used db
count- the flow reached to analyze
The function obtains the converted data and it tests it against a predictive
model. If traffic is attack then the flow gets sent to mitigation and then
gets saved
otherwise, it gets saved
'''
def flowCheck(flow, timeRecorded, arriveT, db, count):
global modelFlow, logger, session, falseFlows, flowsT, flowsA
flowsT += 1
try:
remove = ["Src IP", "Dst IP", "Label\n", "Timestamp", "Flow ID"]
df = utils.to_one_hot_encoding(flow).drop(remove, axis=1)
dat = df.drop(['Flow Byts/s', 'Flow Pkts/s'], axis=1)
df_num = dat.apply(pd.to_numeric)
df_num = df_num.select_dtypes(['number'])
dataset = df_num.to_numpy()
prediction = 0
#session.run(tf.global_variables_initializer())
try:
with session.as_default():
with session.graph.as_default():
modelFlow.model._make_predict_function()
prediction = modelFlow.model.predict(dataset)
prediction = numpy.argmax(prediction[0])
flowsA += prediction
print("Recorded "+ str(flowsT) +" flows ")
print("From those "+ str(flowsA) + " were attacks")
print()
print("This is flow check")
print(flow)
print("The prediction is: "+ str(prediction))
print()
predictedTime = time.time() - timeRecorded
flowId = flow['Flow ID']
#check if overall this flow belongs to an attack
flows = pd.DataFrame()
aux = dtb.getFlowIdFCols(db,['Flow_ID','Label'],flowId[0],arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
aux = dtb.getFlowIdFCols('finalFlow',['Flow_ID','Label'],flowId[0],arriveT)
if aux != []:
flows = flows.append(aux, ignore_index=True)
packets = pd.DataFrame()
aux = dtb.getFlowIdPCols('finalPackets',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets0',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets1',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
aux = dtb.getFlowIdPCols('newPackets2',['Flow_ID', 'Predict'],flowId[0],arriveT)
if aux != []:
packets = packets.append(aux, ignore_index = True)
pred = 0
if flows.empty and packets.empty:
pred = prediction
else:
flows = flows.append(pd.DataFrame([[flow['Flow ID'], prediction]]), ignore_index = True)
if not flows.empty:
flows.columns = ['Flow_ID', 'Label']
else:
flows = pd.DataFrame(columns = ['Flow_ID', 'Label'])
if not packets.empty:
packets.columns = ['Flow_ID', 'Predict']
else:
packets = pd.DataFrame(columns = ['Flow_ID', 'Predict'])
pred = utils.getTargetsF(flows, packets)[0]
if pred != prediction:
logger.info("Found a possible false positive in flows check")
falseFlows += 1
if pred == 0 :
print("Flow Not attack")
print()
print()
insert = threading.Thread(name="check_Flow"+str(count), target = dtb.insertFlow, args=(flow.drop('Label\n', axis=1), prediction, arriveT, predictedTime, predictedTime, db))
insert.start()
insert.join()
elif pred == 1:
print("Flow Attack")
print()
print()
handleAttack = threading.Thread(target = handleDDoS, args=(str(flow['Src IP']),str(flow['Flow ID']),int(flow['Src Port']),'Flow'), daemon=True)
handleAttack.start()
handleAttack.join()
handledTime = time.time() - timeRecorded
insert = threading.Thread(name="check_Flow"+str(count), target = dtb.insertFlow, args=(flow.drop('Label\n', axis=1), prediction, arriveT, predictedTime, handledTime, db))
insert.start()
insert.join()
else:
logger.warning("There is an unexpected prediction answer "+ str(prediction))
except Exception as e:
print(e)
except KeyboardInterrupt as e :
global logger
logger.info("Program interrupted")
return
def follow(thefile):
thefile.seek(0,2)
while True:
line = thefile.readline()
if not line:
#time.sleep(0.1)
continue
yield line
'''
count- the number of flow that we have reached to read (starts from 0)
the function reads flows as they get saved inside the daily flow.csv file,
which then get sent for checking and for retraining if necessary
'''
def watch_flow(count):
global flowWorkingDb, startF, endF, checkF, logger, flowsT, flowsA, packA, packT
day = date.today()
flowPacketName = "data/daily/" + str(day) + "_Flow.csv"
changeUsedModel = threading.Thread(name="changeUsedFlowModel", target=changeUsedFlowModel, args=())
changeUsedModel.start()
try:
logfile = open(flowPacketName, "r")
cols = logfile.readline()
utils.setFlowNames(cols.split(","))
loglines = follow(logfile)
startF = checkF = time.time()
lines = logfile.readlines()
#for line in loglines:
while True:
lines = logfile.readlines()
#print(lines)
#line = logfile.readline()
for line in lines:
count += 1
endF = time.time()
flow = pd.DataFrame([line.split(",")], columns = utils.getFlowNames())
print(flow)
arriveT = flow['Timestamp']
#check if flow is a threat
flowCheck(flow, endF, arriveT, 'newFlow' + str(flowWorkingDb), count)
training = threading.Thread(name="checkTrainingTimesF", target=checkTrainingTimesF, args=(count,))
training.start()
except KeyboardInterrupt:
return
except Exception as e:
print(e)
time.sleep(2)
watch_flow(count)
'''
Starts 1/3 CICFlowMeter instances
This is necessary as some error in the programatic side of the app used only allows
for a correct reading to be done in live reading
In case the application doesn't single-handledly start the reading of traffic on
any, then that needs to be started by hand.
'''
def run_CICFlowMeter():
m = PyMouse()
x, y = m.position()
cmd = ['sudo ./CICFlowMeter']
#open 3 app instances
p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)
#separate the instances on the screen
time.sleep(5)
m.press(650,300,1) #move 1
m.release(60,20)
time.sleep(0.5)
m.move(740,300)
time.sleep(0.5)
'''m.press(740,300,1) #move 2
m.release(100,500)
m.move(750,300)
time.sleep(0.5)
m.press(750,300,1) #move 3
m.release(800,20)
time.sleep(0.5)'''
m.click(60,370) #set load1
time.sleep(2)
'''m.click(750,370) #set load2
time.sleep(0.5)
m.click(60,850) #set load3
time.sleep(0.5)'''
m.click(300,490)
m.click(300,480) #set any
time.sleep(0.25)
'''m.click(790,490)
time.sleep(0.25)
m.click(790,480) #set any
time.sleep(0.25)
m.click(300,870)
time.sleep(0.25)
m.click(300,960) #set any
time.sleep(0.25)'''
s = time.time()
m.click(60,410) #start 1
time.sleep(0.25)
'''m.click(740,400) #start 2
time.sleep(0.5)
m.click(30,878) #start 3'''
'''inst1 = threading.Thread(target = run1, args=(m,s))
inst2 = threading.Thread(target = run2, args=(m,s))
inst3 = threading.Thread(target = run3, args=(m,s))'''
'''inst1.start()
inst2.start()
inst3.start()'''
p.wait()
'''
choose an interface
and then capture the traffic and the respective flow
any to sniff all interfaces
timeout- capture time
'''
def read_and_analize_traffic():
print(-1, "any ")
interfaces = utils.interface_names()
for i, value in enumerate(interfaces):
print(i, value)
print('\n')
iface = input("Please select interface by name: ")
flowReader = threading.Thread(target=run_CICFlowMeter, args=())
flowReader.start()
packet = threading.Thread(target=capture_interface, args=(iface,))
flow = threading.Thread(target=watch_flow, args=(0,))
packet.start()
flow.start()
packet.join()
flow.join()
'''
run 3 stops the 1st instance every 180 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run3(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 180:
m.click(30,920) #stop 3
time.sleep(0.5)
m.click(30,920)
time.sleep(0.5)
m.click(400,780) #save in time 3
time.sleep(0.25)
m.click(30,878) #start 3
t = e
pas = True
run3(m, t)
except KeyboardInterrupt as e:
return
'''
run 2 stops the 1st instance every 120 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run2(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 120:
m.click(750,450) #stop 2
time.sleep(0.25)
m.click(750,450)
time.sleep(0.5)
m.click(990,310) #save in time 2
time.sleep(0.25)
m.click(740,400) #start 2
t = e
pas = True
run2(m, t)
except KeyboardInterrupt as e:
return
'''
run 1 stops the 1st instance every 60 seconds and saves the recorded flows
m - mouse controller
t - current time
'''
def run1(m, t):
try:
pas = False
while pas == False:
e = time.time()
if e - t >= 60:
m.click(60,450) #stop 1
time.sleep(0.25)
m.click(60,450)
time.sleep(0.5)
m.click(390,310) #save in time 1
time.sleep(0.25)
m.click(60,400) #start 1
t = e
pas = True
run1(m, t)
except KeyboardInterrupt as e:
return
'''
Lets you choose a model out of the existent ones to become
the currently used one
'''
def choose_model(models, name):
print("Choose model to be used as the current model for "+ name)
for i in range(0,len(models)):
print(str(i+1)+ ". " + str(models[i]))
modelInd = input("\nWhich model (index)?\n")
if int(modelInd)-1 in range(0,len(models)):
return str(models[int(modelInd)-1])
else:
print("Choose an index\n")
choose_model(models, name)
'''
model - model to be retrained
data - data used for retraining
THis function completely retrains a model from a data file
'''
def retrain_model(model, data):
global session, logger
encodedData = []
targetsF = []
if 'Time to Live' in data:
target= pd.DataFrame(data[['target']].applymap(lambda x: utils.get_Target(x)))
targetsF = U.to_categorical(target, num_classes=2)
print(data.columns)
keep = ['Source IP', 'Dest IP', 'Source Port', 'Dest Port', 'Byte Size', 'Packet Length', 'Time to Live', 'Packets/Time']
encodedData = data[keep]
encodedData['Packet Type'] = data[['Packet Type']].applymap(lambda x: utils.get_Type_Coding(x))
print(encodedData.columns)
exit()
try:
retrainingInfo = train_test_split(encodedData, targetsF, test_size=0.2, random_state=42)
model.load_data(retrainingInfo.copy())
print("loaded")
session.run(tf.initialize_all_variables())
stats = model.train(20, 6, 9, 'filePacketTrained', patience=50)
print("trained")
try:
with session.as_default():
with session.graph.as_default():
score = model.evaluate()
model.save_model('trafficModels')
print('Test loss: ' + str(round(score[0], 3)))
print('Test accuracy ' + str(round(score[1], 3)) + " (+/-" + str(numpy.std(round(score[1], 3))) + ")")
plt.plot(stats['train_loss'])
plt.plot(stats['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print(stats)
except Exception as ex:
logger.log('Error s ', ex, ex.__traceback__.tb_lineno)
except Exception as e:
logger.error("Exception occurred", exc_info=True)
else:
encodedData = pd.DataFrame(columns=utils.getPacketNames())
encodedData = data.apply(lambda x: utils.labelEncoder(x, 'Training')[0], axis=1)
encodedData.columns = utils.getPacketNames()
targets = data['target']
targetsF = U.to_categorical(targets.copy(), num_classes=2)
try:
retrainingInfo = train_test_split(encodedData, targetsF, test_size=0.2, random_state=42)
model.load_data(retrainingInfo.copy())
print("loaded")
session.run(tf.initialize_all_variables())
stats = model.train(20, 6, 8, 'filePacketTrained', patience=20)
print("trained")
try:
with session.as_default():
with session.graph.as_default():
score = model.evaluate()
model.save_model('trafficModels')
print('Test loss: ' + str(round(score[0], 3)))
print('Test accuracy ' + str(round(score[1], 3)) + " (+/-" + str(numpy.std(round(score[1], 3))) + ")")
plt.plot(stats['train_loss'])
plt.plot(stats['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print(stats)
except Exception as ex:
logger.log('Error s ', ex, ex.__traceback__.tb_lineno)
except Exception as e:
logger.error("Exception occurred", exc_info=True)
'''
name- name of the training data type (i.e packets or flows)
Function lets you choose the data to be used for retraining
'''
def get_training_data(name):
global ROOT_DIR
name = name.replace('Models','Data')
print(name)
dataFiles = os.listdir(os.path.join(ROOT_DIR, name))
chosenFile = None
while chosenFile not in dataFiles:
print("Choose a data file index to be used for training:\n")
for i in range(len(dataFiles)):
print(str(i)+". "+str(dataFiles[i]))
chosenFile = input("\n Data:\n")
if int(chosenFile) not in range(len(dataFiles)):
print("Please choose an index")
else:
chosenFile = dataFiles[int(chosenFile)]
print(chosenFile)
if name == 'trafficData' and chosenFile != "final dataset.arff":
data = pd.read_csv(name+"/"+chosenFile, delimiter=',')
fieldNames = ['Source IP', 'Dest IP', 'Source Port', 'Dest Port',
'Packet Length', 'Packets/Time', 'Highest Layer', 'Transport Layer', 'target']
return data[fieldNames]
elif name == 'trafficData' and chosenFile == "final dataset.arff":
data = arff.loadarff('trafficData/final dataset.arff')
df = | pd.DataFrame(data[0]) | pandas.DataFrame |
from pathlib import Path
import pandas as pd
from sqlalchemy import create_engine
DB_HOST = 'localhost'
DB_PORT = 5432
DB = 'group_16'
def main():
user = input('Enter pgadmin username: ')
password = input('Enter pgadmin password: ')
weather_csv = Path('weather.csv')
if weather_csv.is_file():
weather_df = pd.read_csv('weather.csv', dtype={'weather': str})
else:
weather_df = prepare_weather_data()
insert_weather(weather_df, user, password)
def prepare_weather_data():
# Filter relevant stations from the file (ON, hourly data still active 2014+)
print('Getting station names...')
iter_stations = pd.read_csv('Station Inventory EN.csv', header=3, chunksize=1000)
station_df = | pd.concat([chunk[(chunk['Province'] == 'ONTARIO') & (chunk['HLY Last Year'] >= 2014)] for chunk in iter_stations], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on 2018-09-13
@author: <NAME>
"""
import numpy as np
import pandas as pd
CURRENT_ROUND = 38
# Load data from all 2018 rounds
# Data from https://github.com/henriquepgomide/caRtola
rounds = []
rounds.append(pd.read_csv('data/rodada-1.csv'))
rounds.append(pd.read_csv('2018/data/rodada-2.csv'))
rounds.append(pd.read_csv('2018/data/rodada-3.csv'))
rounds.append(pd.read_csv('2018/data/rodada-4.csv'))
rounds.append(pd.read_csv('2018/data/rodada-5.csv'))
rounds.append(pd.read_csv('2018/data/rodada-6.csv'))
rounds.append(pd.read_csv('2018/data/rodada-7.csv'))
rounds.append(pd.read_csv('2018/data/rodada-8.csv'))
rounds.append(pd.read_csv('2018/data/rodada-9.csv'))
rounds.append(pd.read_csv('2018/data/rodada-10.csv'))
rounds.append(pd.read_csv('2018/data/rodada-11.csv'))
rounds.append(pd.read_csv('2018/data/rodada-12.csv'))
rounds.append(pd.read_csv('2018/data/rodada-13.csv'))
rounds.append(pd.read_csv('2018/data/rodada-14.csv'))
rounds.append(pd.read_csv('2018/data/rodada-15.csv'))
rounds.append(pd.read_csv('2018/data/rodada-16.csv'))
rounds.append(pd.read_csv('2018/data/rodada-17.csv'))
rounds.append(pd.read_csv('2018/data/rodada-18.csv'))
rounds.append(pd.read_csv('2018/data/rodada-19.csv'))
rounds.append(pd.read_csv('2018/data/rodada-20.csv'))
rounds.append(pd.read_csv('2018/data/rodada-21.csv'))
rounds.append(pd.read_csv('2018/data/rodada-22.csv'))
rounds.append(pd.read_csv('2018/data/rodada-23.csv'))
rounds.append(pd.read_csv('2018/data/rodada-24.csv'))
rounds.append(pd.read_csv('2018/data/rodada-25.csv'))
rounds.append(pd.read_csv('2018/data/rodada-26.csv'))
rounds.append(pd.read_csv('2018/data/rodada-27.csv'))
rounds.append(pd.read_csv('2018/data/rodada-28.csv'))
rounds.append(pd.read_csv('2018/data/rodada-29.csv'))
rounds.append(pd.read_csv('2018/data/rodada-30.csv'))
rounds.append(pd.read_csv('2018/data/rodada-31.csv'))
rounds.append(pd.read_csv('2018/data/rodada-32.csv'))
rounds.append(pd.read_csv('2018/data/rodada-33.csv'))
rounds.append(pd.read_csv('2018/data/rodada-34.csv'))
rounds.append(pd.read_csv('2018/data/rodada-35.csv'))
rounds.append(pd.read_csv('2018/data/rodada-36.csv'))
rounds.append(pd.read_csv('2018/data/rodada-37.csv'))
df = pd.concat(rounds)
#df.shape[0]
cols_scouts_def = ['CA','CV','DD','DP','FC','GC','GS','RB','SG'] # alphabetical order
cols_scouts_atk = ['A','FD','FF','FS','FT','G','I','PE','PP'] # alphabetical order
cols_scouts = cols_scouts_def + cols_scouts_atk
# remove players who didn't play
df = df[(df['atletas.pontos_num'] !=0 ) & (df['atletas.variacao_num'] != 0)]
#df.shape[0]
# import list with all played matches and the matches of the next round
# data from https://github.com/henriquepgomide/caRtola
match_index = pd.read_csv('2018/data/2018_partidas.csv')
# creates a list with all players and asign the current round to then
players_list = df.drop_duplicates(subset = 'atletas.atleta_id')
players_list['atletas.rodada_id'] = CURRENT_ROUND
df = df.append(players_list)
# Incluir demais informações no DataFrame df necessárias para fazer o modelo
#
# Será incluída coluna extras:
# - idenficador de time
# - informando se partida foi em casa ou fora de casa
# - 5 colunas com resultados das partidas passadas
# - índice de força do ataque e defesa dos times retira do site FiveThirtyEight (https://projects.fivethirtyeight.com/soccer-predictions/brasileirao/)
#
# the two csv files above were created to make it easy to identify a club
# in the 'rodada-xx.csv' e '2018_partidas.csv' files
club_id_partida = pd.read_csv('2018/data/clube_partida.csv', index_col = 0).to_dict()['id']
club_id_rodada = pd.read_csv('2018/data/clube_rodada.csv', index_col = 0).to_dict()['id']
club_id_rodada_2 = pd.read_csv('2018/data/clube_rodada.csv')
# Importa .csv com a força ofensiva e defensiva de cada time
# retirado de https://projects.fivethirtyeight.com/soccer-predictions/brasileirao/
forca_clubes = pd.read_csv('2018/data/forca-clubes-fte.csv')
# match place matrix
# 1 = home
# 0 = away
# lines represent the rounds and columns represent the clubs.
home_or_away = pd.DataFrame(data = np.zeros((39, 20)))
# Inclui colunas extras no DataFrame de partidas (match_index)
# Novas columas são:
# - id do time de casa
# - id do time de fora
home_team_id_list = []
away_team_id_list = []
for index, row in match_index.iterrows():
#print(row['home_team'], row['away_team'])
home_team_id = club_id_partida[row['home_team']]
away_team_id = club_id_partida[row['away_team']]
home_team_id_list.append(home_team_id)
away_team_id_list.append(away_team_id)
home_or_away[home_team_id][row['round']] = 1
home_or_away[away_team_id][row['round']] = 0
match_index['home_team_id'] = home_team_id_list
match_index['away_team_id'] = away_team_id_list
# new features for the DataFrame df
home_team_col = [] # Coluna para informação de jogo em casa ou fora.
id_team_col = [] # Coluna com o id do Time
id_adv_team_col = []
adv_team_name_col = []
off_own_team = [] # Coluna com força de ataque do time do jogador em questão
def_own_team = [] # Coluna com força de defesa do time do jogador em questão
off_adv_team = [] # Coluna com força de ataque do adversário do jogador em questão
def_adv_team = [] # Coluna com força de defesa do adversário do jogador em questão
for index, row in df.iterrows(): # itera jogador por jogador
j = club_id_rodada[row['atletas.clube.id.full.name']] # pega nome do time no df e encontra o id dele
i = row['atletas.rodada_id'] # numero da rodada
# busca qual o id do time adversário
try: # caso o jogador jogou em casa
adv_id = match_index[ (match_index['round'] == i) & \
(match_index['home_team_id'] == j)]['away_team_id'].iloc[0]
except IndexError: # caso o jogador jogou fora
try:
adv_id = match_index[ (match_index['round'] == i) & \
(match_index['away_team_id'] == j)]['home_team_id'].iloc[0]
except IndexError: # caso não tenha sido encontrado o time adversário
adv_id = j
off_own_team.append(forca_clubes['Off'][j])
def_own_team.append(forca_clubes['Def'][j])
off_adv_team.append(forca_clubes['Off'][adv_id])
def_adv_team.append(forca_clubes['Def'][adv_id])
home_team_col.append(home_or_away[j][i])
id_team_col.append(j)
id_adv_team_col.append(adv_id)
adv_team_name = club_id_rodada_2[club_id_rodada_2['id'] == adv_id]['clube'].iloc[0]
adv_team_name_col.append(adv_team_name)
df['home_team'] = home_team_col
df['id_team'] = id_team_col
df['id_adv_team'] = id_adv_team_col
df['adv_team_name'] = adv_team_name_col
df['off_own_team'] = off_own_team
df['def_own_team'] = def_own_team
df['off_adv_team'] = off_adv_team
df['def_adv_team'] = def_adv_team
df['score_index'] = df['off_own_team'] * df['def_adv_team']
df['take_goal_index'] = df['off_adv_team'] * df['def_own_team']
# Transform cummulative feature into current round only feature
def fix_cummulative_feat (df, round_):
suffixes = ('_curr', '_prev')
cols_current = [col + suffixes[0] for col in cols_scouts]
cols_prev = [col + suffixes[1] for col in cols_scouts]
df_round = df[df['atletas.rodada_id'] == round_]
if round_ == 1:
df_round.fillna(value=0, inplace=True)
return df_round
df_round_prev = df[df['atletas.rodada_id'] < round_].groupby('atletas.atleta_id', as_index=False)[cols_scouts].max()
df_players = df_round.merge(df_round_prev, how='left', on=['atletas.atleta_id'], suffixes=suffixes)
# if is the first round of a player, the scouts of previous rounds will be NaNs. Thus, set them to zero
df_players.fillna(value=0, inplace=True)
# compute the scouts
df_players[cols_current] = df_players[cols_current].values - df_players[cols_prev].values
# update the columns
df_players.drop(labels=cols_prev, axis=1, inplace=True)
df_players = df_players.rename(columns=dict(zip(cols_current, cols_scouts)))
df_players.SG = df_players.SG.clip_lower(0)
return df_players
df1 = pd.DataFrame()
for round_ in range(1, CURRENT_ROUND):
df_round = fix_cummulative_feat(df, round_)
print("Dimensões da rodada #{0}: {1}".format(round_, df_round.shape))
df1 = df1.append(df_round, ignore_index=True)
# get info for the last round from df
df1 = df1.append(df[df['atletas.rodada_id'] == CURRENT_ROUND], ignore_index=True)
# new features representing previous matchs points
points_n1 = [] # pontuação na rodada n-1
points_n2 = [] # pontuação na rodada n-2
points_n3 = [] # pontuação na rodada n-3
points_n4 = [] # pontuação na rodada n-4
points_n5 = [] # pontuação na rodada n-5
def previous_played_rounds(df, rodada, atleta):
"""Função usada para buscar no DataFrame df pontuação de rounds passadas
Args:
rodada: número da rodada onde será buscada a pontuação passada
atleta: id od jogador a ser buscado
"""
#if rodada <= 1: return [0,0,0,0,0]
list_ = | pd.DataFrame() | pandas.DataFrame |
import time
import pytest
import numpy as np
import pandas as pd
from hyperactive import Hyperactive
def objective_function_0(opt):
score = -opt["x1"] * opt["x1"]
return score
search_space_0 = {
"x1": list(np.arange(-5, 6, 1)),
}
search_space_1 = {
"x1": list(np.arange(0, 6, 1)),
}
search_space_2 = {
"x1": list(np.arange(-5, 1, 1)),
}
search_space_3 = {
"x1": list(np.arange(-1, 1, 0.1)),
}
search_space_4 = {
"x1": list(np.arange(-1, 0, 0.1)),
}
search_space_5 = {
"x1": list(np.arange(0, 1, 0.1)),
}
search_space_para_0 = [
(search_space_0),
(search_space_1),
(search_space_2),
(search_space_3),
(search_space_4),
(search_space_5),
]
@pytest.mark.parametrize("search_space", search_space_para_0)
def test_trafo_0(search_space):
hyper = Hyperactive()
hyper.add_search(objective_function_0, search_space, n_iter=25)
hyper.run()
for value in hyper.results(objective_function_0)["x1"].values:
if value not in search_space["x1"]:
assert False
# ----------------- # Test if memory warm starts do work as intended
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
data = load_breast_cancer()
X, y = data.data, data.target
def objective_function_1(opt):
dtc = DecisionTreeClassifier(min_samples_split=opt["min_samples_split"])
scores = cross_val_score(dtc, X, y, cv=10)
time.sleep(0.1)
return scores.mean()
search_space_0 = {
"min_samples_split": list(np.arange(2, 12)),
}
search_space_1 = {
"min_samples_split": list(np.arange(12, 22)),
}
search_space_2 = {
"min_samples_split": list(np.arange(22, 32)),
}
memory_dict = {"min_samples_split": range(2, 12), "score": range(2, 12)}
memory_warm_start_0 = pd.DataFrame(memory_dict)
memory_dict = {"min_samples_split": range(12, 22), "score": range(12, 22)}
memory_warm_start_1 = pd.DataFrame(memory_dict)
memory_dict = {"min_samples_split": range(22, 32), "score": range(22, 32)}
memory_warm_start_2 = | pd.DataFrame(memory_dict) | pandas.DataFrame |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.comparison_analysis import ca_controller
# pylint: disable=E1101
# pylint: disable=W0603
DF_EMPTY = pd.DataFrame()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["historical", "help"], []),
(["q", ".."], [".."]),
],
)
def test_menu_with_queue(expected, mocker, queue):
mocker.patch(
target=(
"openbb_terminal.stocks.comparison_analysis.ca_controller."
"ComparisonAnalysisController.switch"
),
return_value=["quit"],
)
result_menu = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
queue=queue,
).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=ca_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.stocks.comparison_analysis.ca_controller.session",
)
mocker.patch(
target="openbb_terminal.stocks.comparison_analysis.ca_controller.session.prompt",
return_value="quit",
)
result_menu = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["quit", " quit", "quit mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=ca_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target="openbb_terminal.stocks.comparison_analysis.ca_controller.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=(
"openbb_terminal.stocks.comparison_analysis.ca_controller."
"ComparisonAnalysisController.switch"
),
new=mock_switch,
)
result_menu = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = ca_controller.ComparisonAnalysisController()
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
("r", ["quit", "quit", "reset", "stocks", "ca"]),
],
)
def test_switch(an_input, expected_queue):
controller = ca_controller.ComparisonAnalysisController()
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = ca_controller.ComparisonAnalysisController()
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
[
"quit",
"quit",
"quit",
],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
["quit", "quit", "reset", "stocks", "ca", "set MOCK_SIMILAR"],
),
(
"call_reset",
["help"],
["quit", "quit", "reset", "stocks", "ca", "set MOCK_SIMILAR", "help"],
),
],
)
def test_call_func_expect_queue(expected_queue, queue, func):
controller = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR"],
queue=queue,
)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, mocked_func, other_args, called_with",
[
(
"call_set",
None,
[
"--similar=MOCK_TICKER_1,MOCK_TICKER_2",
],
None,
),
(
"call_add",
None,
[
"--similar=MOCK_TICKER_1,MOCK_TICKER_2",
],
None,
),
(
"call_rmv",
None,
[
"--similar=MOCK_TICKER_1,MOCK_TICKER_2",
],
None,
),
(
"call_historical",
"yahoo_finance_view.display_historical",
[
"--type=h",
"--no-scale",
"--start=2020-12-01",
"--export=csv",
],
dict(
similar_tickers=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
start="2020-12-01",
candle_type="h",
normalize=False,
export="csv",
),
),
(
"call_hcorr",
"yahoo_finance_view.display_correlation",
[
"--type=h",
"--start=2020-12-01",
],
dict(
similar_tickers=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
start="2020-12-01",
candle_type="h",
export="",
),
),
(
"call_volume",
"yahoo_finance_view.display_volume",
[
"--start=2020-12-01",
"--export=csv",
],
dict(
similar_tickers=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
start="2020-12-01",
export="csv",
),
),
(
"call_income",
"marketwatch_view.display_income_comparison",
[
"--quarter",
"--timeframe=MOCK_TIMEFRAME",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
timeframe="MOCK_TIMEFRAME",
quarter=True,
),
),
(
"call_balance",
"marketwatch_view.display_balance_comparison",
[
"--quarter",
"--timeframe=MOCK_TIMEFRAME",
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
timeframe="MOCK_TIMEFRAME",
quarter=True,
),
),
(
"call_cashflow",
"marketwatch_view.display_cashflow_comparison",
[
"--quarter",
"--timeframe=MOCK_TIMEFRAME",
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
timeframe="MOCK_TIMEFRAME",
quarter=True,
),
),
(
"call_sentiment",
"finbrain_view.display_sentiment_compare",
[
"--raw",
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
raw=True,
export="csv",
),
),
(
"call_scorr",
"finbrain_view.display_sentiment_correlation",
[
"--raw",
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
raw=True,
export="csv",
),
),
(
"call_overview",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="overview",
export="csv",
),
),
(
"call_valuation",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="valuation",
export="csv",
),
),
(
"call_financial",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="financial",
export="csv",
),
),
(
"call_ownership",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="ownership",
export="csv",
),
),
(
"call_performance",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="performance",
export="csv",
),
),
(
"call_technical",
"finviz_compare_view.screener",
[
"--export=csv",
],
dict(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
data_type="technical",
export="csv",
),
),
],
)
def test_call_func(tested_func, mocked_func, other_args, called_with, mocker):
if mocked_func:
mock = mocker.Mock()
mocker.patch(
"openbb_terminal.stocks.comparison_analysis." + mocked_func,
new=mock,
)
controller = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
)
getattr(controller, tested_func)(other_args=other_args)
if isinstance(called_with, dict):
mock.assert_called_once_with(**called_with)
elif isinstance(called_with, list):
mock.assert_called_once_with(*called_with)
else:
mock.assert_called_once()
else:
controller = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"],
)
getattr(controller, tested_func)(other_args=other_args)
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func",
[
"call_ticker",
"call_getpoly",
"call_getfinnhub",
"call_getfinviz",
"call_set",
"call_add",
"call_rmv",
"call_historical",
"call_hcorr",
"call_volume",
"call_income",
"call_balance",
"call_cashflow",
"call_sentiment",
"call_scorr",
"call_overview",
"call_valuation",
"call_financial",
"call_ownership",
"call_performance",
"call_technical",
"call_tsne",
],
)
def test_call_func_no_parser(func, mocker):
mocker.patch(
"openbb_terminal.stocks.comparison_analysis.ca_controller.parse_known_args_and_warn",
return_value=None,
)
controller = ca_controller.ComparisonAnalysisController(
similar=["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"]
)
func_result = getattr(controller, func)(other_args=list())
assert func_result is None
assert controller.queue == []
getattr(ca_controller, "parse_known_args_and_warn").assert_called_once()
@pytest.mark.vcr(record_mode="none")
def test_call_ticker(mocker):
similar = ["MOCK_SIMILAR_1", "MOCK_SIMILAR_2"]
mock = mocker.Mock(return_value= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
from featuretools import Timedelta
from featuretools.computational_backends import PandasBackend
from featuretools.primitives import (
Absolute,
Add,
Count,
CumCount,
CumMax,
CumMean,
CumMin,
CumSum,
Day,
Diff,
DirectFeature,
Divide,
Equals,
Feature,
GreaterThan,
GreaterThanEqualTo,
Haversine,
Hour,
IdentityFeature,
IsIn,
IsNull,
Latitude,
LessThan,
LessThanEqualTo,
Longitude,
Mod,
Mode,
Multiply,
Negate,
Not,
NotEquals,
NumCharacters,
NumWords,
Percentile,
Subtract,
Sum,
get_transform_primitives,
make_trans_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.variable_types import Boolean, Datetime, Numeric, Variable
# some tests change the entityset values, so we have to create it fresh
# for each test (rather than setting scope='module')
@pytest.fixture
def es():
return make_ecommerce_entityset()
@pytest.fixture(scope='module')
def int_es():
return make_ecommerce_entityset(with_integer_time_index=True)
def test_make_trans_feat(es):
f = Hour(es['log']['datetime'])
pandas_backend = PandasBackend(es, [f])
df = pandas_backend.calculate_all_features(instance_ids=[0],
time_last=None)
v = df[f.get_name()][0]
assert v == 10
def test_diff(es):
value = IdentityFeature(es['log']['value'])
customer_id_feat = \
DirectFeature(es['sessions']['customer_id'],
child_entity=es['log'])
diff1 = Diff(value, es['log']['session_id'])
diff2 = Diff(value, customer_id_feat)
pandas_backend = PandasBackend(es, [diff1, diff2])
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
val1 = df[diff1.get_name()].values.tolist()
val2 = df[diff2.get_name()].values.tolist()
correct_vals1 = [
np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7
]
correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]
for i, v in enumerate(val1):
v1 = val1[i]
if np.isnan(v1):
assert (np.isnan(correct_vals1[i]))
else:
assert v1 == correct_vals1[i]
v2 = val2[i]
if np.isnan(v2):
assert (np.isnan(correct_vals2[i]))
else:
assert v2 == correct_vals2[i]
def test_diff_single_value(es):
diff = Diff(es['stores']['num_square_feet'], es['stores'][u'région_id'])
pandas_backend = PandasBackend(es, [diff])
df = pandas_backend.calculate_all_features(instance_ids=[5],
time_last=None)
assert df.shape[0] == 1
assert df[diff.get_name()].dropna().shape[0] == 0
def test_compare_of_identity(es):
to_test = [(Equals, [False, False, True, False]),
(NotEquals, [True, True, False, True]),
(LessThan, [True, True, False, False]),
(LessThanEqualTo, [True, True, True, False]),
(GreaterThan, [False, False, False, True]),
(GreaterThanEqualTo, [False, False, True, True])]
features = []
for test in to_test:
features.append(test[0](es['log']['value'], 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_direct(es):
log_rating = DirectFeature(es['products']['rating'],
child_entity=es['log'])
to_test = [(Equals, [False, False, False, False]),
(NotEquals, [True, True, True, True]),
(LessThan, [False, False, False, True]),
(LessThanEqualTo, [False, False, False, True]),
(GreaterThan, [True, True, True, False]),
(GreaterThanEqualTo, [True, True, True, False])]
features = []
for test in to_test:
features.append(test[0](log_rating, 4.5))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_transform(es):
day = Day(es['log']['datetime'])
to_test = [(Equals, [False, True]),
(NotEquals, [True, False]),
(LessThan, [True, False]),
(LessThanEqualTo, [True, True]),
(GreaterThan, [False, False]),
(GreaterThanEqualTo, [False, True])]
features = []
for test in to_test:
features.append(test[0](day, 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 14],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_agg(es):
count_logs = Count(es['log']['id'],
parent_entity=es['sessions'])
to_test = [(Equals, [False, False, False, True]),
(NotEquals, [True, True, True, False]),
(LessThan, [False, False, True, False]),
(LessThanEqualTo, [False, False, True, True]),
(GreaterThan, [True, True, False, False]),
(GreaterThanEqualTo, [True, True, False, True])]
features = []
for test in to_test:
features.append(test[0](count_logs, 2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_all_nans(es):
nan_feat = Mode(es['log']['product_id'], es['sessions'])
compare = nan_feat == 'brown bag'
# before all data
time_last = pd.Timestamp('1/1/1993')
pandas_backend = PandasBackend(es, [nan_feat, compare])
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=time_last)
assert df[nan_feat.get_name()].dropna().shape[0] == 0
assert not df[compare.get_name()].any()
def test_arithmetic_of_val(es):
to_test = [(Add, [2.0, 7.0, 12.0, 17.0], [2.0, 7.0, 12.0, 17.0]),
(Subtract, [-2.0, 3.0, 8.0, 13.0], [2.0, -3.0, -8.0, -13.0]),
(Multiply, [0, 10, 20, 30], [0, 10, 20, 30]),
(Divide, [0, 2.5, 5, 7.5], [np.inf, 0.4, 0.2, 2 / 15.0],
[np.nan, np.inf, np.inf, np.inf])]
features = []
logs = es['log']
for test in to_test:
features.append(test[0](logs['value'], 2))
features.append(test[0](2, logs['value']))
features.append(Divide(logs['value'], 0))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[2 * i].get_name()].values.tolist()
assert v == test[1]
v = df[features[2 * i + 1].get_name()].values.tolist()
assert v == test[2]
test = to_test[-1][-1]
v = df[features[-1].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1:]
def test_arithmetic_two_vals_fails(es):
with pytest.raises(ValueError):
Add(2, 2)
def test_arithmetic_of_identity(es):
logs = es['log']
to_test = [(Add, [0., 7., 14., 21.]),
(Subtract, [0, 3, 6, 9]),
(Multiply, [0, 10, 40, 90]),
(Divide, [np.nan, 2.5, 2.5, 2.5])]
features = []
for test in to_test:
features.append(test[0](logs['value'], logs['value_2']))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
i, test = 3, to_test[-1]
v = df[features[i].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = es['products']['rating']
log_rating = DirectFeature(rating,
child_entity=es['log'])
customer_age = es['customers']['age']
session_age = DirectFeature(customer_age,
child_entity=es['sessions'])
log_age = DirectFeature(session_age,
child_entity=es['log'])
to_test = [(Add, [38, 37, 37.5, 37.5]),
(Subtract, [28, 29, 28.5, 28.5]),
(Multiply, [165, 132, 148.5, 148.5]),
(Divide, [6.6, 8.25, 22. / 3, 22. / 3])]
features = []
for test in to_test:
features.append(test[0](log_age, log_rating))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 3, 5, 7],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# P TODO: rewrite this test
def test_arithmetic_of_transform(es):
diff1 = Diff(IdentityFeature(es['log']['value']),
IdentityFeature(es['log']['product_id']))
diff2 = Diff(IdentityFeature(es['log']['value_2']),
IdentityFeature(es['log']['product_id']))
to_test = [(Add, [np.nan, 14., -7., 3.]),
(Subtract, [np.nan, 6., -3., 1.]),
(Multiply, [np.nan, 40., 10., 2.]),
(Divide, [np.nan, 2.5, 2.5, 2.])]
features = []
for test in to_test:
features.append(test[0](diff1, diff2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 2, 11, 13],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
likes_ice_cream = es['customers']['loves_ice_cream']
not_feat = Not(likes_ice_cream)
features = [not_feat]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1],
time_last=None)
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = es['customers']['id']
store_id_feat = es['stores']['id']
count_customer = Count(customer_id_feat,
parent_entity=es[u'régions'])
count_stores = Count(store_id_feat,
parent_entity=es[u'régions'])
to_test = [(Add, [6, 2]),
(Subtract, [0, -2]),
(Multiply, [9, 0]),
(Divide, [1, 0])]
features = []
for test in to_test:
features.append(test[0](count_customer, count_stores))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(
instance_ids=['United States', 'Mexico'], time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# TODO latlong is a string in entityset. Asserts in test_latlong fail
# def latlong_unstringify(latlong):
# lat = float(latlong.split(", ")[0].replace("(", ""))
# lon = float(latlong.split(", ")[1].replace(")", ""))
# return (lat, lon)
def test_latlong(es):
log_latlong_feat = es['log']['latlong']
latitude = Latitude(log_latlong_feat)
longitude = Longitude(log_latlong_feat)
features = [latitude, longitude]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for i, v, in enumerate(real_lats):
assert v == latvalues[i]
for i, v, in enumerate(real_lons):
assert v == lonvalues[i]
def test_haversine(es):
log_latlong_feat = es['log']['latlong']
log_latlong_feat2 = es['log']['latlong2']
haversine = Haversine(log_latlong_feat, log_latlong_feat2)
features = [haversine]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
values = df[haversine.get_name()].values
real = [0., 524.15585776, 1043.00845747, 1551.12130243,
2042.79840241, 0., 137.86000883, 275.59396684,
413.07563177, 0., 0., 524.15585776,
0., 739.93819145, 1464.27975511]
assert len(values) == 15
for i, v in enumerate(real):
assert v - values[i] < .0001
def test_cum_sum(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_min(es):
log_value_feat = es['log']['value']
cum_min = CumMin(log_value_feat, es['log']['session_id'])
features = [cum_min]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_min.get_name()].values
assert len(cvalues) == 15
cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i, v in enumerate(cum_min_values):
assert v == cvalues[i]
def test_cum_max(es):
log_value_feat = es['log']['value']
cum_max = CumMax(log_value_feat, es['log']['session_id'])
features = [cum_max]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_max.get_name()].values
assert len(cvalues) == 15
cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
for i, v in enumerate(cum_max_values):
assert v == cvalues[i]
def test_cum_sum_use_previous(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_use_previous_integer_time(int_es):
es = int_es
log_value_feat = es['log']['value']
with pytest.raises(AssertionError):
CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'm'))
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat)
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 50, 50, 50, 50, 50,
0, 5, 5, 12, 26]
for i, v in enumerate(cum_sum_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_sum_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 45, 45, 45, 45, 45, 45,
0, 5, 5, 12, 26]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_group_on_nan(es):
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat, es['log']['product_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 3,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_group_on_nan(es):
# TODO: Figure out how to test where `df`
# in pd_rolling get_function() has multiindex
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat,
es['log']['product_id'],
es["log"]["datetime"],
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 0,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_and_where_absolute(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat, es["log"]["datetime"],
where=compare_feat,
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 50, 0, 0, 0, 0, 0,
0, 5, 0, 7, 21]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_mean(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'])
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, .5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_use_previous(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 10, 15, 0, .5, 1, 2, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat)
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 5, 7.5, 10, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5,
0, 5, 5, 6, 26. / 3]
for i, v in enumerate(cum_mean_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_mean_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(2, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_mean_values = [0, 5, 7.5, 12.5, 17.5, 17.5, 17.5, 17.5, 17.5, 17.5,
0, 5, 5, 6, 10.5]
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_count(es):
log_id_feat = es['log']['id']
cum_count = CumCount(log_id_feat, es['log']['session_id'])
features = [cum_count]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_count.get_name()].values
assert len(cvalues) == 15
cum_count_values = [1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 1, 2, 1, 2, 3]
for i, v in enumerate(cum_count_values):
assert v == cvalues[i]
def test_text_primitives(es):
words = NumWords(es['log']['comments'])
chars = NumCharacters(es['log']['comments'])
features = [words, chars]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,
240, 1239, 3, 3, 3, 3]
char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,
437, 1325, 6322, 10, 10, 10, 10]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_overrides(es):
value = Feature(es['log']['value'])
value2 = Feature(es['log']['value_2'])
feats = [Add, Subtract, Multiply, Divide]
compare_ops = [GreaterThan, LessThan, Equals, NotEquals,
GreaterThanEqualTo, LessThanEqualTo]
assert Negate(value).hash() == (-value).hash()
compares = [(value, value),
(value, value2),
(value2, 2)]
overrides = [
value + value,
value - value,
value * value,
value / value,
value > value,
value < value,
value == value,
value != value,
value >= value,
value <= value,
value + value2,
value - value2,
value * value2,
value / value2,
value > value2,
value < value2,
value == value2,
value != value2,
value >= value2,
value <= value2,
value2 + 2,
value2 - 2,
value2 * 2,
value2 / 2,
value2 > 2,
value2 < 2,
value2 == 2,
value2 != 2,
value2 >= 2,
value2 <= 2,
]
i = 0
for left, right in compares:
for feat in feats:
f = feat(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
for compare_op in compare_ops:
f = compare_op(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
our_reverse_overrides = [
2 + value2,
2 - value2,
2 * value2,
2 / value2]
i = 0
for feat in feats:
if feat != Mod:
f = feat(2, value2)
o = our_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
python_reverse_overrides = [
2 < value2,
2 > value2,
2 == value2,
2 != value2,
2 <= value2,
2 >= value2]
i = 0
for compare_op in compare_ops:
f = compare_op(value2, 2)
o = python_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
def test_override_boolean(es):
count = Count(es['log']['value'], es['sessions'])
count_lo = GreaterThan(count, 1)
count_hi = LessThan(count, 10)
to_test = [[True, True, True],
[True, True, False],
[False, False, True]]
features = []
features.append(count_lo.OR(count_hi))
features.append(count_lo.AND(count_hi))
features.append(~(count_lo.AND(count_hi)))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_override_cmp_from_variable(es):
count_lo = IdentityFeature(es['log']['value']) > 1
to_test = [False, True, True]
features = [count_lo]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
v = df[count_lo.get_name()].values.tolist()
for i, test in enumerate(to_test):
assert v[i] == test
def test_override_cmp(es):
count = Count(es['log']['value'], es['sessions'])
_sum = Sum(es['log']['value'], es['sessions'])
gt_lo = count > 1
gt_other = count > _sum
ge_lo = count >= 1
ge_other = count >= _sum
lt_hi = count < 10
lt_other = count < _sum
le_hi = count <= 10
le_other = count <= _sum
ne_lo = count != 1
ne_other = count != _sum
to_test = [[True, True, False],
[False, False, True],
[True, True, True],
[False, False, True],
[True, True, True],
[True, True, False],
[True, True, True],
[True, True, False]]
features = [gt_lo, gt_other, ge_lo, ge_other, lt_hi,
lt_other, le_hi, le_other, ne_lo, ne_other]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_isin_feat(es):
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_custom(es):
def pd_is_in(array, list_of_outputs=None):
if list_of_outputs is None:
list_of_outputs = []
return pd.Series(array).isin(list_of_outputs)
def isin_generate_name(self):
return u"%s.isin(%s)" % (self.base_features[0].get_name(),
str(self.kwargs['list_of_outputs']))
IsIn = make_trans_primitive(
pd_is_in,
[Variable],
Boolean,
name="is_in",
description="For each value of the base feature, checks whether it is "
"in a list that is provided.",
cls_attributes={"generate_name": isin_generate_name})
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isnull_feat(es):
value = IdentityFeature(es['log']['value'])
diff = Diff(value, es['log']['session_id'])
isnull = IsNull(diff)
features = [isnull]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(15), None)
# correct_vals_diff = [
# np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7]
correct_vals = [True, False, False, False, False, True, False, False,
False, True, True, False, True, False, False]
values = df[isnull.get_name()].values.tolist()
assert correct_vals == values
def test_init_and_name(es):
from featuretools import calculate_feature_matrix
log = es['log']
features = [Feature(v) for v in log.variables] +\
[GreaterThan(Feature(es["products"]["rating"], es["log"]), 2.5)]
# Add Timedelta feature
features.append(pd.Timestamp.now() - Feature(log['datetime']))
for transform_prim in get_transform_primitives().values():
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features)
else:
matching_inputs = match(input_types, features)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for s in matching_inputs:
instance = transform_prim(*s)
# try to get name and calculate
instance.get_name()
calculate_feature_matrix([instance], entityset=es).head(5)
def test_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
pandas_backend = PandasBackend(es, [p])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
p2 = Percentile(p - 1)
pandas_backend = PandasBackend(es, [p, p2])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pandas_backend = PandasBackend(es, [agg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum()[[0, 1]]
for t, a in zip(true_p.values, df[agg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg(es):
v = Feature(es['log']['value'])
agg = Sum(v, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
true_p = log_vals.groupby('session_id')[v.get_name()].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and | pd.isnull(a) | pandas.isnull |
import pandas as pd
import numpy as np
# Import data to dataframe
players = pd.read_csv('players.csv')
goalies = pd.read_csv('goalies.csv')
# Remove unnecessary columns:
players = players.drop(columns=['adjusted_assists',
'adjusted_goals',
'adjusted_goals_against_average',
'adjusted_goals_created',
'adjusted_points',
'corsi_against',
'corsi_for',
'corsi_for_percentage',
'defensive_point_shares',
'defensive_zone_start_percentage',
'even_strength_assists',
'even_strength_goals',
'even_strength_goals_allowed',
'even_strength_save_percentage',
'even_strength_shots_faced',
'fenwick_against',
'fenwick_for',
'fenwick_for_percentage',
'giveaways',
'goal_against_percentage_relative',
'goalie_point_shares',
'goals_against_on_ice',
'goals_created',
'goals_for_on_ice',
'goals_saved_above_average',
'height',
'league',
'minutes',
'offensive_point_shares',
'offensive_zone_start_percentage',
'pdo',
'point_shares',
'power_play_goals_against_on_ice',
'power_play_goals_allowed',
'power_play_goals_for_on_ice',
'power_play_save_percentage',
'power_play_shots_faced',
'quality_start_percentage',
'relative_corsi_for_percentage',
'relative_fenwick_for_percentage',
'save_percentage_on_ice',
'shooting_percentage_on_ice',
'shootout_attempts',
'shootout_goals',
'shootout_misses',
'shootout_percentage',
'short_handed_goals_allowed',
'short_handed_save_percentage',
'short_handed_shots_faced',
'shots_against',
'takeaways',
'ties_plus_overtime_loss',
'time_on_ice_even_strength',
'total_goals_against_on_ice',
'total_goals_for_on_ice',
'weight',
'faceoff_losses',
'faceoff_percentage',
'wins',
'total_shots',
'shutouts',
'quality_starts',
'really_bad_starts',
'save_percentage',
'saves',
'losses',
'goals_against_average',
'goals_against',
])
goalies = goalies.drop(columns=['adjusted_assists',
'adjusted_goals',
'adjusted_goals_against_average',
'adjusted_goals_created',
'adjusted_points',
'corsi_against',
'corsi_for',
'corsi_for_percentage',
'defensive_point_shares',
'defensive_zone_start_percentage',
'even_strength_assists',
'even_strength_goals',
'even_strength_goals_allowed',
'even_strength_save_percentage',
'even_strength_shots_faced',
'fenwick_against',
'fenwick_for',
'fenwick_for_percentage',
'giveaways',
'goal_against_percentage_relative',
'goalie_point_shares',
'goals_against_on_ice',
'goals_created',
'goals_for_on_ice',
'goals_saved_above_average',
'height',
'league',
'minutes',
'offensive_point_shares',
'offensive_zone_start_percentage',
'pdo',
'point_shares',
'power_play_goals_against_on_ice',
'power_play_goals_allowed',
'power_play_goals_for_on_ice',
'power_play_save_percentage',
'power_play_shots_faced',
'quality_start_percentage',
'relative_corsi_for_percentage',
'relative_fenwick_for_percentage',
'save_percentage_on_ice',
'shooting_percentage_on_ice',
'shootout_attempts',
'shootout_goals',
'shootout_misses',
'shootout_percentage',
'short_handed_goals_allowed',
'short_handed_save_percentage',
'short_handed_shots_faced',
'shots_against',
'takeaways',
'ties_plus_overtime_loss',
'time_on_ice_even_strength',
'total_goals_against_on_ice',
'total_goals_for_on_ice',
'weight',
'faceoff_losses',
'faceoff_percentage',
'assists',
'average_time_on_ice',
'blocks_at_even_strength',
'faceoff_wins',
'game_winning_goals',
'games_played',
'goals',
'hits_at_even_strength',
'penalties_in_minutes',
'plus_minus',
'points',
'power_play_assists',
'power_play_goals',
'shooting_percentage',
'short_handed_assists',
'short_handed_goals',
'shots_on_goal'
])
players = players.fillna(0)
goalies = goalies.fillna(0)
# Replace season values
s = players['season'].isin(['Career', 'season'])
players.loc[~s, 'season'] = players.groupby(s.ne(s.shift()).cumsum()).cumcount() + 1
l = goalies['season'].isin(['Career', 'season'])
goalies.loc[~l, 'season'] = goalies.groupby(l.ne(s.shift()).cumsum()).cumcount() + 1
# Drop Career and season rows
players.drop(players[players['season'] == 'Career'].index, inplace=True)
players.drop(players[players['season'] == 'season'].index, inplace=True)
goalies.drop(goalies[goalies['season'] == 'Career'].index, inplace=True)
goalies.drop(goalies[goalies['season'] == 'season'].index, inplace=True)
# Change column values to ints
changes_players = ['assists',
'blocks_at_even_strength', 'faceoff_wins', 'game_winning_goals',
'goals', 'hits_at_even_strength',
'penalties_in_minutes', 'plus_minus', 'points',
'power_play_assists', 'power_play_goals',
'short_handed_assists', 'short_handed_goals',
'shots_on_goal', 'games_played', 'season']
changes_goalies = ['losses',
'quality_starts', 'really_bad_starts',
'saves', 'shutouts',
'time_on_ice', 'total_shots', 'wins']
for integer in changes_players:
players[integer] = pd.to_numeric(players[integer])
for integer in changes_goalies:
goalies[integer] = pd.to_numeric(goalies[integer])
####################### TEST ENVIRONMENT ################################
test = pd.DataFrame()
test['goals/game'] = players['goals']/(players['games_played'])
test['points/game'] = players['points']/(players['games_played'])
test['shots/game'] = players['shots_on_goal']/(players['games_played'])
test['gwg'] = players['game_winning_goals']/(players['games_played'])
test['assists'] = players['assists']/(players['games_played'])
test['ppa'] = players['power_play_assists']/(players['games_played'])
test['ppg'] = players['power_play_goals']/(players['games_played'])
test['shp'] = (players['short_handed_assists']+players['short_handed_goals'])/(players['games_played'])
test['season'] = players['season']
# test['shooting%'] = players['shooting_percentage']
test['plus_minus'] = players['plus_minus']
test['name'] = players['name']
test['hits'] = players['hits_at_even_strength']/(players['games_played'])
test['blocks'] = players['blocks_at_even_strength']/(players['games_played'])
test['PIM'] = players['penalties_in_minutes']/(players['games_played'])
# test['TOI'] = players['average_time_on_ice']
# test['FO%'] = players['faceoff_percentage']
test['FOW'] = players['faceoff_wins']/(players['games_played'])
test['games_played'] = players['games_played']
# For loop to predict all players
selected_players = [player for player in test['name']]
selected_players = list(set(selected_players))
player_prediction = | pd.DataFrame() | pandas.DataFrame |
import os
import cv2
import json
import dlib
import shutil
import joblib
import exifread
import warnings
import numpy as np
import pandas as pd
import face_recognition
from pathlib import Path
from joblib import Parallel, delayed
def get_model(cfg):
from tensorflow.keras import applications
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD, Adam
base_model = getattr(applications, cfg.model.model_name)(
include_top=False,
input_shape=(cfg.model.img_size, cfg.model.img_size, 3),
pooling="avg"
)
features = base_model.output
pred_gender = Dense(units=2, activation="softmax", name="pred_gender")(features)
pred_age = Dense(units=101, activation="softmax", name="pred_age")(features)
model = Model(inputs=base_model.input, outputs=[pred_gender, pred_age])
return model
detector = dlib.get_frontal_face_detector()
def overwrite(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def extract(source_dir, age_gender=False, exif=False):
from omegaconf import OmegaConf
from tensorflow.keras.utils import get_file
global output_dir, network_dir, face_dir, detector, model
output_dir=os.path.join(Path(source_dir), "Face Network/")
network_dir=os.path.join(output_dir, "Data/")
face_dir=os.path.join(output_dir, "Faces/")
overwrite(output_dir)
overwrite(network_dir)
overwrite(face_dir)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
pretrained_model = "https://github.com/yu4u/age-gender-estimation/releases/download/v0.6/EfficientNetB3_224_weights.11-3.44.hdf5"
modhash = '6d7f7b7ced093a8b3ef6399163da6ece'
weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5", pretrained_model, cache_subdir="pretrained_models",
file_hash=modhash, cache_dir=str(Path(__file__).resolve().parent))
# load model and weights
model_name, img_size = Path(weight_file).stem.split("_")[:2]
img_size = int(img_size)
cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
model = get_model(cfg)
model.load_weights(weight_file)
img_list=makelist('.jpg', source_dir=source_dir)
all_images= | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
"""
This module contains functions for debugging decision tree matcher.
"""
import logging
import subprocess
import pandas as pd
import six
from py_entitymatching.utils.validation_helper import validate_object_type
from sklearn.tree import export_graphviz
from py_entitymatching.feature.extractfeatures import apply_feat_fns
from py_entitymatching.matcher.dtmatcher import DTMatcher
logger = logging.getLogger(__name__)
def visualize_tree(decision_tree, table_columns, exclude_attrs=None):
"""
This function is used to visualize the decision tree learned from the
training data using the 'fit' method.
Note that, this function does not pop up a visualization of a decision tree.
It creates a png file in the local directory and the user has to
explicitly open the file to view the tree. More over, this function uses
'dot' command and graphviz to create the
visualization. It is assumed that the 'dot' command is present and
graphviz is installed in the local machine, which this function is executed.
Args:
decision_tree (DTMatcher or DecisionTreeClassifier): The decision tree
matcher for which the visualization needs to be generated.
table_columns (list): Attributes that were
from the input table that was used to train the decision tree.
exclude_attrs (list): Attributes that should be removed from the
table columns to get the actual feature vectors (defaults to None).
"""
# Validate input parameters
# # We expect the input decision tree to be of type DTMatcher. If so get
# the classifier out of it.
if isinstance(decision_tree, DTMatcher):
tree = decision_tree.clf
else:
tree = decision_tree
# If the exclude attribute is nothing, then all the given columns are
# feature vectors.
if exclude_attrs is None:
feature_names = table_columns
else:
# Else pick out the feature vector columns based on the exclude
# attributes.
columns = [c not in exclude_attrs for c in table_columns]
feature_names = table_columns[columns]
# Create a file (as of now hardcoded) and write the tree into that file.
with open("dt_.dot", 'w') as f:
export_graphviz(tree, out_file=f,
feature_names=feature_names)
# Create a png file from the dot file and store it in the same directory
command = ["dot", "-Tpng", "dt_.dot", "-o", "dt_.png"]
# noinspection PyBroadException
try:
subprocess.check_call(command)
except:
logger.error("Could not run dot, ie graphviz, to "
"produce visualization")
return
# Finally, print a help information on how to display the visualization
# from the ipython console.
print("Execute the following command in IPython command prompt:")
print("")
print("from IPython.display import Image")
print("Image(filename='dt_.png') ")
def _get_code(tree, feature_names, target_names,
spacer_base=" "):
"""
Produce psuedo-code for decision tree.
This is based on http://stackoverflow.com/a/30104792.
"""
# Get the left, right trees and the threshold from the tree
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
# Get the features from the tree
features = [feature_names[i] for i in tree.tree_.feature]
value = tree.tree_.value
code_list = []
# Now recursively build the tree by going through each node.
def recurse(left, right, threshold, features, node, depth):
"""
Recurse function to encode the debug logic at each node.
"""
spacer = spacer_base * depth
# For each of the threshold conditions, add appropriate code that
# should be executed.
if threshold[node] != -2:
code_str = spacer + "if ( " + features[node] + " <= " + \
str(threshold[node]) + " ):"
code_list.append(code_str)
code_str = spacer + spacer_base + "print( \'" + spacer_base + "" + \
features[
node] + " <= " + str(
threshold[node]) + \
" is True " + "( value : \' + str(" + str(
features[node]) + ") + \')\')"
code_list.append(code_str)
if left[node] != -1:
recurse(left, right, threshold, features,
left[node], depth + 1)
code_str = spacer + "else:"
code_list.append(code_str)
code_str = spacer + spacer_base + "print( \'" + spacer_base + "" + \
features[
node] + " <= " + str(
threshold[node]) + \
" is False " + "( value : \' + str(" + str(
features[node]) + ") + \')\')"
code_list.append(code_str)
if right[node] != -1:
recurse(left, right, threshold, features,
right[node], depth + 1)
else:
target = value[node]
winning_target_name = None
winning_target_count = None
for i, v in zip( | pd.np.nonzero(target) | pandas.np.nonzero |
'''
This script applies Entity Embedding neural network pioneered by Neokami during the Rossman Challenge in Kaggle to POS data of a hospitality firm
'''
# Import libraries
import numpy as np
import pandas as pd
import pickle
import os
import csv
import sys
from joblib import Parallel, delayed
import multiprocessing
from .Preprocess import Preprocess
from .EntityEmbedding import EntityEmbedding
from .Visualisation import Visualise
from .Forecast import Forecast
from .Helper import Helper
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
sys.setrecursionlimit(10000)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Kami(Preprocess, Visualise, Forecast):
'''Main module'''
def __init__(self, input_f_path, output_dir_path, cache_dir_path, sales_as_label = True, weekly_agg = False, deployment_mode = False, n_1 = 2048, n_2 = 1024, n_3 = 512, n_4 = 256, n_5 = 128, dropout = False, output_activation = 'relu', err_func = 'mean_squared_error', optimizer = 'adam', epochs = 50, patience = 5, batch_size = 1024, n_sample = 500000, n_ensemble = 3, val_split_ratio = 0.95, save_embeddings = True, saved_embeddings_fname = 'embeddings.pickle', *args, **kwargs):
'''Initiate local variables'''
Preprocess.__init__(self, input_f_path, cache_dir_path)
Visualise.__init__(self, output_dir_path, cache_dir_path)
Forecast.__init__(self, output_dir_path, cache_dir_path)
self.r_train, self.r_val = 0, 0
self.input_f_path, self.cache_dir_path, self.output_dir_path, self.weekly_agg, self.deployment_mode = self.input_f_path, cache_dir_path, output_dir_path, weekly_agg, deployment_mode
self.n_1, self.n_2, self.n_3, self.n_4, self.n_5, self.dropout, self.output_activation, self.err_func, self.optimizer, self.epochs, self.patience, self.batch_size, self.n_sample, self.n_ensemble, self.val_split_ratio, self.save_embeddings, self.saved_embeddings_fname = n_1, n_2, n_3, n_4, n_5, dropout, output_activation, err_func, optimizer, epochs, patience, batch_size, n_sample, n_ensemble, val_split_ratio, save_embeddings, saved_embeddings_fname
self.target_label = 'sales' if sales_as_label else 'quantity'
def __repr__(self):
return 'Please assign an object to store the instance'
def extract_csv(self, cache_dir_path, weekly_agg):
'''Convert cached csv files into dictionary-like objects'''
train_path = (cache_dir_path + 'train.csv') if not weekly_agg else (cache_dir_path + 'train_weekly.csv')
test_path = (cache_dir_path + 'test.csv') if not weekly_agg else (cache_dir_path + 'test_weekly.csv')
df_path = (cache_dir_path + 'df.csv') if not weekly_agg else (cache_dir_path + 'df_weekly.csv')
with open(train_path) as csv_train, open(test_path) as csv_test, open(df_path) as csv_df:
train, test, df = csv.reader(csv_train, delimiter = ','), csv.reader(csv_test, delimiter = ','), csv.reader(csv_df, delimiter = ',')
with open(cache_dir_path + 'train.pickle', 'wb') as f_train, open(cache_dir_path + 'test.pickle', 'wb') as f_test, open(cache_dir_path + 'df.pickle', 'wb') as f_df:
train, test, df = Helper.csv2dict(train), Helper.csv2dict(test), Helper.csv2dict(df)
train, df = train[::-1], df[::-1]
pickle.dump(train, f_train, -1), pickle.dump(test, f_test, -1), pickle.dump(df, f_df, -1)
def prep_features(self, cache_dir_path, target_label, deployment_mode):
'''Engineer features to ready for neural network'''
with open(cache_dir_path + 'train.pickle', 'rb') as f_train, open(cache_dir_path + 'test.pickle', 'rb') as f_test, open(cache_dir_path + 'df.pickle', 'rb') as f_df:
train, test, df = pickle.load(f_train), pickle.load(f_test), pickle.load(f_df)
n_train_val = len(train)
train_x, train_y, test_x, test_y, X, y = [], [], [], [], [], []
train_x, train_y = Aux.select_and_split(data = train, target = train_y, features = train_x, target_label = target_label)
test_x, test_y = Aux.select_and_split(data = test, target = test_y, features = test_x, target_label = target_label)
X, y = Aux.select_and_split(data = df, target = y, features = X, target_label = target_label)
if deployment_mode:
print('{0:*^80}'.format('Number of Train & Validation Observations Available:'))
print('{0:*^80}'.format(str(len(y))))
print('{0:*^80}'.format('Range of Train Target:'))
print('{0:*^80}'.format(str(min(y)) + ' to ' + str(max(y))))
else:
print('{0:*^80}'.format('Number of Train & Validation and Test Observations Available:'))
print('{0:*^80}'.format(str(len(train_y)) + ' and ' + str(len(test_y))))
print('{0:*^80}'.format('Range of Train Target:'))
print('{0:*^80}'.format(str(min(train_y)) + ' to ' + str(max(train_y))))
train_x, test_x, X = Aux.encode_labels(train_features = train_x, test_features = test_x, all_features = X, cache_dir_path = cache_dir_path)
with open(cache_dir_path + 'train_prepped.pickle', 'wb') as f_train, open(cache_dir_path + 'test_prepped.pickle', 'wb') as f_test, open(cache_dir_path + 'all_prepped.pickle', 'wb') as f_all:
pickle.dump((train_x, train_y), f_train, -1), pickle.dump(test_x, f_test, -1), pickle.dump((X, y), f_all, -1)
| pd.DataFrame(test_x) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = | lib.to_object_array_tuples(values) | pandas._libs.lib.to_object_array_tuples |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
from os import mkdir
from os.path import join
from warnings import filterwarnings
import tempfile
import shutil
import json
import tarfile
import qiime2
import pandas as pd
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from q2_sample_classifier.visuals import (
_two_way_anova, _pairwise_stats, _linear_regress,
_calculate_baseline_accuracy, _custom_palettes,
_plot_heatmap_from_confusion_matrix, _add_sample_size_to_xtick_labels)
from q2_sample_classifier.classify import (
regress_samples_ncv, classify_samples_ncv, fit_classifier, fit_regressor,
maturity_index, detect_outliers, split_table, predict_classification,
predict_regression, scatterplot, confusion_matrix, summarize)
from q2_sample_classifier.utilities import (
split_optimize_classify, _set_parameters_and_estimator, _load_data,
_calculate_feature_importances, _extract_important_features,
_train_adaboost_base_estimator, _disable_feature_selection,
_mean_feature_importance, _null_feature_importance, _extract_features,
_match_series_or_die, _extract_rfe_scores, _predict_and_plot)
from q2_sample_classifier import (
BooleanSeriesFormat, BooleanSeriesDirectoryFormat, BooleanSeries,
PredictionsFormat, PredictionsDirectoryFormat, ClassifierPredictions,
RegressorPredictions, ImportanceFormat, ImportanceDirectoryFormat,
Importance, SampleEstimatorDirFmt, PickleFormat, SampleEstimator,
Classifier, Regressor)
from q2_sample_classifier._format import JSONFormat
from q2_types.sample_data import SampleData
from q2_types.feature_data import FeatureData
import pkg_resources
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugin import ValidationError
from qiime2.plugins import sample_classifier
import sklearn
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import RFECV
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
import pandas.util.testing as pdt
import biom
filterwarnings("ignore", category=UserWarning)
filterwarnings("ignore", category=Warning)
filterwarnings("ignore", category=ConvergenceWarning)
class SampleClassifierTestPluginBase(TestPluginBase):
package = 'q2_sample_classifier.tests'
def setUp(self):
super().setUp()
self.temp_dir = tempfile.TemporaryDirectory(
prefix='q2-sample-classifier-test-temp-')
def tearDown(self):
self.temp_dir.cleanup()
def get_data_path(self, filename):
return pkg_resources.resource_filename(self.package,
'data/%s' % filename)
class UtilitiesTests(SampleClassifierTestPluginBase):
def setUp(self):
super().setUp()
exp_rf = pd.DataFrame(
{'importance': [0.1, 0.2, 0.3]}, index=['a', 'b', 'c'])
exp_rf.index.name = 'feature'
self.exp_rf = exp_rf
exp_svm = pd.DataFrame(
{'importance0': [0.1, 0.2, 0.3], 'importance1': [0.4, 0.5, 0.6]},
index=['a', 'b', 'c'])
exp_svm.index.name = 'feature'
self.exp_svm = exp_svm
exp_lsvm = pd.DataFrame(
{'importance0': [-0.048794, -0.048794, -0.048794]},
index=['a', 'b', 'c'])
exp_lsvm.index.name = 'feature'
self.exp_lsvm = exp_lsvm
self.features = biom.Table(np.array([[1]*5]*3), ['a', 'b', 'c'],
list(map(str, range(5))))
self.targets = pd.Series(['a', 'a', 'b', 'b', 'a'], name='bullseye')
def test_extract_important_features_1d_array(self):
importances = _extract_important_features(
self.features.ids('observation'),
np.ndarray((3,), buffer=np.array([0.1, 0.2, 0.3])))
self.assertEqual(sorted(self.exp_rf), sorted(importances))
def test_extract_important_features_2d_array(self):
importances = _extract_important_features(
self.features.ids('observation'),
np.ndarray(
(2, 3), buffer=np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])))
self.assertEqual(sorted(self.exp_svm), sorted(importances))
# test feature importance calculation with main classifier types
def test_calculate_feature_importances_ensemble(self):
estimator = Pipeline(
[('dv', DictVectorizer()), ('est', RandomForestClassifier())])
estimator.fit(_extract_features(self.features), self.targets)
fi = _calculate_feature_importances(estimator)
self.assertEqual(sorted(self.exp_rf), sorted(fi))
def test_calculate_feature_importances_svm(self):
estimator = Pipeline(
[('dv', DictVectorizer()), ('est', LinearSVC())])
estimator.fit(_extract_features(self.features), self.targets)
fi = _calculate_feature_importances(estimator)
self.assertEqual(sorted(self.exp_lsvm), sorted(fi))
# confirm that feature selection incompatibility warnings work
def test_disable_feature_selection_unsupported(self):
with self.assertWarnsRegex(UserWarning, "does not support recursive"):
_disable_feature_selection('KNeighborsClassifier', False)
def test_mean_feature_importance_1d_arrays(self):
exp = pd.DataFrame([10, 9, 8, 7], columns=["importance0"],
index=[3, 2, 1, 0])
imps = [pd.DataFrame([1, 2, 3, 4], columns=["importance0"]),
pd.DataFrame([5, 6, 7, 8], columns=["importance0"]),
pd.DataFrame([9, 10, 11, 12], columns=["importance0"]),
pd.DataFrame([13, 14, 15, 16], columns=["importance0"])]
pdt.assert_frame_equal(_mean_feature_importance(imps), exp)
def test_mean_feature_importance_different_column_names(self):
exp = pd.DataFrame([[6, 5, 4, 3], [14, 13, 12, 11]],
index=["importance0", "importance1"],
columns=[3, 2, 1, 0]).T
imps = [pd.DataFrame([1, 2, 3, 4], columns=["importance0"]),
pd.DataFrame([5, 6, 7, 8], columns=["importance0"]),
pd.DataFrame([9, 10, 11, 12], columns=["importance1"]),
| pd.DataFrame([13, 14, 15, 16], columns=["importance1"]) | pandas.DataFrame |
import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
clickhouse_driver = pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
# alltypes.double_col is non-nullable
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == '`{}`'.format(column)
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
# TODO: clickhouse-driver escaping bug
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
# (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract(r'(\\d)', 0), ''),
# match but not a valid group number => NULL
# (L('abcd').re_extract('abcd', 3), None),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from random import random
import numpy as np
import pandas as pd
from prereise.gather.demanddata.eia.clean_data import slope_interpolate
def test_slope_interpolate():
# Create a list of data that approximates a sin curve with some randomness
# then manually change two numbers to be outliers
demand_list = [5 + random() * np.sin(np.pi * i / 8) for i in range(1000)]
demand_list[4] = 40
demand_list[100] = 120
ba_dict = {i: {"ba": demand} for i, demand in enumerate(demand_list)}
ba = | pd.DataFrame(ba_dict) | pandas.DataFrame |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import torch
from neuralprophet import NeuralProphet, set_random_seed
from neuralprophet import df_utils
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 256
EPOCHS = 2
BATCH_SIZE = 64
LR = 1.0
PLOT = False
def test_names():
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test():
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=10,
n_forecasts=3,
ar_sparsity=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
df = m._handle_missing_data(df, freq="D", predicting=False)
df_train, df_test = m.split_df(df, freq="D", valid_p=0.1)
metrics = m.fit(df_train, freq="D", validation_df=df_test)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_df_utils_func():
log.info("testing: df_utils Test")
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
# test find_time_threshold
df_dict, _ = df_utils.prep_copy_df_dict(df)
time_threshold = df_utils.find_time_threshold(df_dict, n_lags=2, valid_p=0.2, inputs_overbleed=True)
df_train, df_val = df_utils.split_considering_timestamp(
df_dict, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold
)
# init data params with a list
global_data_params = df_utils.init_data_params(df_dict, normalize="soft")
global_data_params = df_utils.init_data_params(df_dict, normalize="soft1")
global_data_params = df_utils.init_data_params(df_dict, normalize="standardize")
log.debug("Time Threshold: \n {}".format(time_threshold))
log.debug("Df_train: \n {}".format(type(df_train)))
log.debug("Df_val: \n {}".format(type(df_val)))
def test_trend():
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
growth="linear",
n_changepoints=10,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_custom_changepoints():
log.info("testing: Custom Changepoints")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
dates_list = [str(d) for d in dates]
dates_array = pd.to_datetime(dates_list).values
log.debug("dates: {}".format(dates))
log.debug("dates_list: {}".format(dates_list))
log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
for cp in [dates_list, dates_array]:
m = NeuralProphet(
changepoints=cp,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend():
log.info("testing: No-Trend")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
growth="off",
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_seasons():
log.info("testing: Seasonality: additive")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="additive",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("SUM of yearly season params: {}".format(sum(abs(m.model.season_params["yearly"].data.numpy()))))
log.debug("SUM of weekly season params: {}".format(sum(abs(m.model.season_params["weekly"].data.numpy()))))
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
log.info("testing: Seasonality: multiplicative")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
# m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
def test_custom_seasons():
log.info("testing: Custom Seasonality")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
other_seasons = False
m = NeuralProphet(
yearly_seasonality=other_seasons,
weekly_seasonality=other_seasons,
daily_seasonality=other_seasons,
seasonality_mode="additive",
# seasonality_mode="multiplicative",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
log.debug("seasonalities: {}".format(m.season_config.periods))
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar():
log.info("testing: AR")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=7,
yearly_seasonality=False,
epochs=EPOCHS,
# batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_sparse():
log.info("testing: AR (sparse")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=3,
n_lags=14,
ar_sparsity=0.5,
yearly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_deep():
log.info("testing: AR-Net (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg():
log.info("testing: Lagged Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
m = m.add_lagged_regressor(names="A")
m = m.add_lagged_regressor(names="B", only_last_value=True)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=10)
forecast = m.predict(future)
if PLOT:
print(forecast.to_string())
m.plot_last_forecast(forecast, include_previous_forecasts=5)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg_deep():
log.info("testing: List of Lagged Regressors (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=1,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
cols = [col for col in df.columns if col not in ["ds", "y"]]
m = m.add_lagged_regressor(names=cols)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
forecast = m.predict(df)
if PLOT:
# print(forecast.to_string())
# m.plot_last_forecast(forecast, include_previous_forecasts=10)
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_events():
log.info("testing: Events")
df = pd.read_csv(PEYTON_FILE)[-NROWS:]
playoffs = pd.DataFrame(
{
"event": "playoff",
"ds": pd.to_datetime(
[
"2008-01-13",
"2009-01-03",
"2010-01-16",
"2010-01-24",
"2010-02-07",
"2011-01-08",
"2013-01-12",
"2014-01-12",
"2014-01-19",
"2014-02-02",
"2015-01-11",
"2016-01-17",
"2016-01-24",
"2016-02-07",
]
),
}
)
superbowls = pd.DataFrame(
{
"event": "superbowl",
"ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
}
)
events_df = pd.concat((playoffs, superbowls))
m = NeuralProphet(
n_lags=2,
n_forecasts=30,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# set event windows
m = m.add_events(
["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
)
# add the country specific holidays
m = m.add_country_holidays("US", mode="additive", regularization=0.5)
m.add_country_holidays("Indonesia")
m.add_country_holidays("Thailand")
m.add_country_holidays("Philippines")
m.add_country_holidays("Pakistan")
m.add_country_holidays("Belarus")
history_df = m.create_df_with_events(df, events_df)
metrics_df = m.fit(history_df, freq="D")
future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
forecast = m.predict(df=future)
log.debug("Event Parameters:: {}".format(m.model.event_params))
if PLOT:
m.plot_components(forecast)
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_future_reg():
log.info("testing: Future Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
regressors_df_future = | pd.DataFrame(data={"A": df["A"][-50:], "B": df["B"][-50:]}) | pandas.DataFrame |
import io
import os
import re
import sys
import time
import pandas
import datetime
import requests
import mplfinance
from matplotlib import dates
# Basic Data
file_name = __file__[:-3]
absolute_path = os.path.dirname(os.path.abspath(__file__))
# <editor-fold desc='common'>
def load_json_config():
global file_directory
config_file = os.path.join(os.sep, absolute_path, 'Config.cfg')
with open(config_file, 'r') as file_handler:
config_data = file_handler.read()
regex = 'FILE_DIRECTORY=.*'
match = re.findall(regex, config_data)
file_directory = match[0].split('=')[1].strip()
# </editor-fold>
# <editor-fold desc='daily update'>
def save_dict_to_file(dic, txt):
f = open(txt, 'w', encoding='utf-8')
f.write(dic)
f.close()
def load_dict_from_file(txt):
f = open(txt, 'r', encoding='utf-8')
data = f.read()
f.close()
return eval(data)
def crawl_price(date=datetime.datetime.now()):
date_str = str(date).split(' ')[0].replace('-', '')
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + date_str + '&type=ALL')
ret = pandas.read_csv(io.StringIO('\n'.join([i.translate({ord(c): None for c in ' '}) for i in r.text.split('\n') if
len(i.split(',')) == 17 and i[0] != '='])), header=0,
index_col='證券代號')
ret['成交金額'] = ret['成交金額'].str.replace(',', '')
ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
def original_crawl_price(date='2011-01-01 00:00:00'):
print('Begin: original_crawl_price!')
data = {}
success = False
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(date, dateFormatter)
while not success:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success!')
success = True
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
# 減一天
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume.index = pandas.to_datetime(stock_volume.index)
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open.index = pandas.to_datetime(stock_open.index)
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close.index = pandas.to_datetime(stock_close.index)
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high.index = pandas.to_datetime(stock_high.index)
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low.index = pandas.to_datetime(stock_low.index)
stock_low.to_excel(writer, sheet_name='stock_low', index=True)
writer.save()
print('End: original_crawl_price!')
def update_stock_info():
print('Begin: update_stock_info!')
data = {}
count = 1
fail_count = 0
allow_continuous_fail_count = 20
try:
pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
print(r'{} Exist.'.format(stock_file_path))
except FileNotFoundError:
print(r'{} Not Exist.'.format(stock_file_path))
original_crawl_price()
stock_volume_old = pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
stock_volume_old.index = pandas.to_datetime(stock_volume_old.index)
stock_open_old = pandas.read_excel(stock_file_path, sheet_name='stock_open', index_col=0)
stock_open_old.index = pandas.to_datetime(stock_open_old.index)
stock_close_old = pandas.read_excel(stock_file_path, sheet_name='stock_close', index_col=0)
stock_close_old.index = pandas.to_datetime(stock_close_old.index)
stock_high_old = pandas.read_excel(stock_file_path, sheet_name='stock_high', index_col=0)
stock_high_old.index = pandas.to_datetime(stock_high_old.index)
stock_low_old = pandas.read_excel(stock_file_path, sheet_name='stock_low', index_col=0)
stock_low_old.index = pandas.to_datetime(stock_low_old.index)
last_date = stock_volume_old.index[-1]
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(str(last_date), dateFormatter)
date += datetime.timedelta(days=1)
if date > datetime.datetime.now():
print('Finish update_stock_info!')
sys.exit(0)
while date < datetime.datetime.now() and count <= 100:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success {} times!'.format(count))
fail_count = 0
count += 1
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume_new = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume_new.index = pandas.to_datetime(stock_volume_new.index)
stock_volume = pandas.concat([stock_volume_old, stock_volume_new], join='outer')
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open_new = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open_new.index = | pandas.to_datetime(stock_open_new.index) | pandas.to_datetime |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import difflib
import scipy
import collections
import json
# package imports
import rba
from .rba import RbaModel, ConstraintMatrix, Solver
from .rba_SimulationData import RBA_SimulationData
from .rba_SimulationParameters import RBA_SimulationParameters
from .rba_ModelStructure import RBA_ModelStructure
from .rba_Problem import RBA_Problem
from .rba_Matrix import RBA_Matrix
from .rba_LP import RBA_LP
from .rba_FBA import RBA_FBA
from .rba_LogBook import RBA_LogBook
class RBA_Session(object):
"""
Top level of the RBA API.
Attributes
----------
xml_dir : str
Current Growth rate as numeric value
model : rba.RbaModel
Current Growth rate as numeric value
matrices : rba.ConstraintMatrix
Current Growth rate as numeric value
solver : rba.Solver
Current Growth rate as numeric value
Problem : rbatools.RBA_Problem
Current Growth rate as numeric value
Medium : dict
Current Growth rate as numeric value
ModelStructure : rbatools.RBA_ModelStructure
Current Growth rate as numeric value
Results : dict
Current Growth rate as numeric value
Parameters : dict
Current Growth rate as numeric value
SimulationData : rbatools.RBA_SimulationData
Current Growth rate as numeric value
SimulationParameters : rbatools.RBA_SimulationParameters
Current Growth rate as numeric value
Methods
----------
__init__(xml_dir)
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
rebuild_from_model()
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
reloadModel()
Reloads model from xml-files and then rebuild computational model-representation (matrix).
recordResults(runName)
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
recordParameters(runName)
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
clearResults()
Removes all previosly recorded results and deletes own 'Results'-attribute.
clearParameters()
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
writeResults(session_name='', digits=10)
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
returnExchangeFluxes()
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
ConstraintSaturation(constraints=None)
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
setMedium(changes)
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
setMu(Mu)
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
doSolve(runName='DontSave')
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
findMaxGrowthRate(precision=0.0005, max=4, start_value=None, recording=False)
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
knockOut(gene)
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
FeasibleRange(variables=None)
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
ParetoFront(variable_X, variable_Y, N=10, sign_VY='max')
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
def __init__(self, xml_dir):
"""
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
"""
self.xml_dir = xml_dir
self.LogBook = RBA_LogBook('Controler')
if not hasattr(self, 'ModelStructure'):
if os.path.isfile(str(self.xml_dir+'/ModelStructure.json')):
self.ModelStructure = RBA_ModelStructure()
with open(str(self.xml_dir+'/ModelStructure.json'), 'r') as myfile:
data = myfile.read()
self.ModelStructure.fromJSON(inputString=data)
else:
self.build_ModelStructure()
self.model = RbaModel.from_xml(input_dir=xml_dir)
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.LogBook.addEntry('Model loaded from {}.'.format(self.xml_dir))
self.Problem = RBA_Problem(solver=self.solver)
medium = pandas.read_csv(xml_dir+'/medium.tsv', sep='\t')
self.Medium = dict(zip(list(medium.iloc[:, 0]), [float(i)
for i in list(medium.iloc[:, 1])]))
self.Mu = self.Problem.Mu
self.ExchangeMap = buildExchangeMap(self)
def build_ModelStructure(self):
self.ModelStructure = RBA_ModelStructure()
self.ModelStructure.fromFiles(xml_dir=self.xml_dir)
self.ModelStructure.exportJSON(path=self.xml_dir)
def addExchangeReactions(self):
"""
Adds explicit exchange-reactions of boundary-metabolites to RBA-problem, named R_EX_ followed by metabolite name (without M_ prefix).
"""
Mets_external = [m.id for m in self.model.metabolism.species if m.boundary_condition]
Mets_internal = [m.id for m in self.model.metabolism.species if not m.boundary_condition]
Reactions = [r.id for r in self.model.metabolism.reactions]
full_S = rba.core.metabolism.build_S(
Mets_external+Mets_internal, self.model.metabolism.reactions)
S_M_ext = full_S[:len(Mets_external), ].toarray()
col_indices_toremove = []
for i in range(S_M_ext.shape[1]):
s_col_uniques = list(set(list(S_M_ext[:, i])))
if len(s_col_uniques) == 1:
if s_col_uniques[0] == 0:
col_indices_toremove.append(i)
RemainingReactions = [i for i in Reactions if Reactions.index(
i) not in col_indices_toremove]
S_ext = numpy.delete(S_M_ext, col_indices_toremove, axis=1)
A = numpy.concatenate((S_ext, numpy.eye(len(Mets_external))), axis=1, out=None)
ColNames = RemainingReactions+[str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]
# print(str('R_EX_'+i.split('M_')[-1]))
LBs = list([self.Problem.LP.LB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[-10000]*len(Mets_external))
UBs = list([self.Problem.LP.UB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[10000]*len(Mets_external))
b = [0]*len(Mets_external)
f = list([self.Problem.LP.f[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[0]*len(Mets_external))
ExchangeMatrix = RBA_Matrix()
ExchangeMatrix.A = scipy.sparse.coo_matrix(A)
ExchangeMatrix.b = numpy.array([0]*len(Mets_external))
ExchangeMatrix.f = numpy.array(f)
ExchangeMatrix.LB = numpy.array(LBs)
ExchangeMatrix.UB = numpy.array(UBs)
ExchangeMatrix.row_signs = ['E']*len(Mets_external)
ExchangeMatrix.row_names = Mets_external
ExchangeMatrix.col_names = ColNames
ExchangeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=ExchangeMatrix)
self.ExchangeReactionMap = dict(
zip(Mets_external, [str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]))
def rebuild_from_model(self):
"""
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
"""
self.LogBook.addEntry('Model rebuilt.')
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.Problem = RBA_Problem(solver=self.solver)
self.setMedium(changes=self.Medium)
def reloadModel(self):
"""
Reloads model from xml-files and then rebuild computational model-representation (matrix).
"""
self.LogBook.addEntry('Model reloaded from {}.'.format(self.xml_dir))
self.model = RbaModel.from_xml(input_dir=self.xml_dir)
self.rebuild_from_model()
def recordResults(self, runName):
"""
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Solution recorded under {}.'.format(runName))
if not hasattr(self, 'Results'):
self.Results = {'Reactions': pandas.DataFrame(index=list(self.ModelStructure.ReactionInfo.Elements.keys())),
'Enzymes': pandas.DataFrame(index=list(self.ModelStructure.EnzymeInfo.Elements.keys())),
'Processes': pandas.DataFrame(index=[self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery' for i in self.ModelStructure.ProcessInfo.Elements.keys()]),
'Proteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinMatrix['Proteins'])),
'ProtoProteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinGeneMatrix['ProtoProteins'])),
'Constraints': pandas.DataFrame(index=self.Problem.LP.row_names),
'SolutionType': pandas.DataFrame(index=['SolutionType']),
'Mu': pandas.DataFrame(index=['Mu']),
'ObjectiveFunction': pandas.DataFrame(index=self.Problem.LP.col_names),
'ObjectiveValue': pandas.DataFrame(index=['ObjectiveValue']),
'ExchangeFluxes': pandas.DataFrame(index=list(self.ExchangeMap.keys()))}
Exchanges = self.returnExchangeFluxes()
for i in Exchanges.keys():
self.Results['ExchangeFluxes'].loc[i, runName] = Exchanges[i]
self.Results['Reactions'][runName] = self.Results['Reactions'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Reactions'].index)})
self.Results['Enzymes'][runName] = self.Results['Enzymes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Enzymes'].index)})
self.Results['Processes'][runName] = self.Results['Processes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Processes'].index)})
self.Results['Constraints'][runName] = self.Results['Constraints'].index.map(
{i: self.Problem.DualValues[i] for i in self.Problem.LP.row_names})
self.Results['Proteins'][runName] = self.Results['Proteins'].index.map(
ProteomeRecording(self, runName))
self.Results['ProtoProteins'][runName] = self.Results['ProtoProteins'].index.map(
ProtoProteomeRecording(self, runName, self.Results['Proteins']))
self.Results['SolutionType'][runName] = self.Problem.SolutionType
self.Results['Mu'][runName] = self.Problem.Mu
self.Results['ObjectiveFunction'][runName] = list(self.Problem.getObjective().values())
self.Results['ObjectiveValue'][runName] = self.Problem.ObjectiveValue
def recordParameters(self, runName):
"""
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Coefficients recorded under {}.'.format(runName))
EnzymeCapacities = self.get_parameter_values(
parameter_type='enzyme_efficiencies', species=None, output_format='dict')
ProcessCapacities = self.get_parameter_values(
parameter_type='machine_efficiencies', species=None, output_format='dict')
CompartmentCapacities = self.get_parameter_values(
parameter_type='maximal_densities', species=None, output_format='dict')
TargetValues = self.get_parameter_values(
parameter_type='target_values', species=None, output_format='dict')
if not hasattr(self, 'Parameters'):
self.Parameters = {'EnzymeEfficiencies_FW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'EnzymeEfficiencies_BW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'ProcessEfficiencies': pandas.DataFrame(index=list(ProcessCapacities.keys())),
'CompartmentCapacities': pandas.DataFrame(index=list(CompartmentCapacities.keys())),
'Medium': pandas.DataFrame(index=self.Medium.keys()),
'TargetValues': pandas.DataFrame(index=[TargetValues[i]['Target_id'] for i in list(TargetValues.keys())])}
self.Parameters['EnzymeEfficiencies_FW'][runName] = self.Parameters['EnzymeEfficiencies_FW'].index.map({i: list(
EnzymeCapacities[i]['Forward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['EnzymeEfficiencies_BW'][runName] = self.Parameters['EnzymeEfficiencies_BW'].index.map({i: list(
EnzymeCapacities[i]['Backward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['ProcessEfficiencies'][runName] = self.Parameters['ProcessEfficiencies'].index.map(
{i: list(ProcessCapacities[i].values())[0] for i in list(ProcessCapacities.keys()) if len(list(ProcessCapacities[i].values())) > 0})
self.Parameters['CompartmentCapacities'][runName] = self.Parameters['CompartmentCapacities'].index.map(
{i: list(CompartmentCapacities[i].values())[0] for i in list(CompartmentCapacities.keys()) if len(list(CompartmentCapacities[i].values())) > 0})
self.Parameters['Medium'][runName] = self.Parameters['Medium'].index.map(self.Medium)
self.Parameters['TargetValues'][runName] = self.Parameters['TargetValues'].index.map(
{TargetValues[i]['Target_id']: list(TargetValues[i]['Target_value'].values())[0] for i in list(TargetValues.keys()) if len(list(TargetValues[i]['Target_value'].values())) > 0})
def clearResults(self):
"""
Removes all previosly recorded results and deletes own 'Results'-attribute.
"""
self.LogBook.addEntry('Results cleared.')
delattr(self, 'Results')
def clearParameters(self):
"""
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
"""
self.LogBook.addEntry('Parameters cleared.')
delattr(self, 'Parameters')
def writeResults(self, session_name='', digits=5, loggingIntermediateSteps=False):
"""
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
"""
self.LogBook.addEntry('Data written under {}.'.format(session_name))
if hasattr(self, 'Results'):
self.Results['uniqueReactions'] = mapIsoReactions(Controller=self)
self.Results['SolutionType'] = self.Results['SolutionType']
self.Results['Mu'] = self.Results['Mu'].round(digits)
self.Results['ObjectiveFunction'] = self.Results['ObjectiveFunction'].loc[(
self.Results['ObjectiveFunction'] != 0).any(axis=1)].round(digits)
self.Results['ObjectiveValue'] = self.Results['ObjectiveValue'].round(digits)
self.Results['Proteins'] = self.Results['Proteins'].round(digits)
self.Results['uniqueReactions'] = self.Results['uniqueReactions'].round(digits)
self.Results['Reactions'] = self.Results['Reactions'].round(digits)
self.Results['Enzymes'] = self.Results['Enzymes'].round(digits)
self.Results['Processes'] = self.Results['Processes'].round(digits)
self.Results['Constraints'] = self.Results['Constraints'].round(digits)
self.Results['ExchangeFluxes'] = self.Results['ExchangeFluxes'].round(digits)
self.SimulationData = RBA_SimulationData(StaticData=self.ModelStructure)
self.SimulationData.fromSimulationResults(Controller=self, session_name=session_name)
if hasattr(self, 'Parameters'):
self.Parameters['EnzymeEfficiencies_FW'] = self.Parameters['EnzymeEfficiencies_FW'].round(
digits)
self.Parameters['EnzymeEfficiencies_BW'] = self.Parameters['EnzymeEfficiencies_BW'].round(
digits)
self.Parameters['ProcessEfficiencies'] = self.Parameters['ProcessEfficiencies'].round(
digits)
self.Parameters['CompartmentCapacities'] = self.Parameters['CompartmentCapacities'].round(
digits)
self.Parameters['TargetValues'] = self.Parameters['TargetValues'].round(digits)
self.Parameters['Medium'] = self.Parameters['Medium'].loc[(
self.Parameters['Medium'] != 0).any(axis=1)].round(digits)
self.SimulationParameters = RBA_SimulationParameters(StaticData=self.ModelStructure)
self.SimulationParameters.fromSimulationResults(Controller=self)
def returnExchangeFluxes(self):
"""
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
"""
out = {}
for j in self.ExchangeMap.keys():
netflux = 0
for k in self.ExchangeMap[j].keys():
netflux += self.ExchangeMap[j][k]*self.Problem.SolutionValues[k]
if netflux != 0:
out[j] = netflux
return(out)
def ConstraintSaturation(self, constraints=None):
"""
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
"""
if constraints is None:
ConstraintsInQuestion = self.Problem.LP.row_names
else:
if isinstance(constraints, list):
ConstraintsInQuestion = constraints
elif isinstance(constraints, str):
ConstraintsInQuestion = [constraints]
if len(list(constraints)) > 0:
if isinstance(constraints[0], list):
ConstraintsInQuestion = constraints[0]
if isinstance(constraints[0], str):
ConstraintsInQuestion = [constraints[0]]
if len(list(constraints)) == 0:
ConstraintsInQuestion = self.Problem.LP.row_names
rhs = self.Problem.getRighthandSideValue(ConstraintsInQuestion)
lhs = self.Problem.calculateLefthandSideValue(ConstraintsInQuestion)
RHS = list(rhs.values())
LHS = list(lhs.values())
Out = pandas.DataFrame(columns=['LHS', 'RHS', 'Saturation'], index=ConstraintsInQuestion)
for i in ConstraintsInQuestion:
lhval = LHS[self.Problem.LP.rowIndicesMap[i]]
rhval = RHS[self.Problem.LP.rowIndicesMap[i]]
sat = numpy.nan
if rhval != 0:
sat = lhval/rhval
Out.loc[i, 'LHS'] = lhval
Out.loc[i, 'RHS'] = rhval
Out.loc[i, 'Saturation'] = sat
self.LogBook.addEntry(
'Saturation of constraint {} determined to be {}.'.format(i, sat))
return(Out)
def setMedium(self, changes, loggingIntermediateSteps=False):
"""
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
"""
for species in (changes.keys()):
self.Medium[species] = float(changes[species])
self.Problem.ClassicRBAmatrix.set_medium(self.Medium)
self.Problem.ClassicRBAmatrix.build_matrices(self.Mu)
inputMatrix = RBA_Matrix()
inputMatrix.loadMatrix(matrix=self.Problem.ClassicRBAmatrix)
self.Problem.LP.updateMatrix(matrix=inputMatrix, Ainds=MediumDependentCoefficients_A(
self), Binds=[], CTinds=[], LBinds=None, UBinds=None)
def setMu(self, Mu, loggingIntermediateSteps=False):
"""
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
"""
self.LogBook.addEntry('Growth-rate changed:{} --> {}'.format(self.Mu, float(Mu)))
self.Problem.setMu(Mu=float(Mu), ModelStructure=self.ModelStructure,
logging=loggingIntermediateSteps)
self.Mu = float(Mu)
def doSolve(self, runName='DontSave', loggingIntermediateSteps=False):
"""
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
"""
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
if runName is not 'DontSave':
if runName is 'Auto':
if hasattr(self, 'Results'):
name = str(self.Results['Reactions'].shape[1]+1)
if not hasattr(self, 'Results'):
name = '1'
if runName is not 'Auto':
name = runName
self.recordResults(runName=name)
def findMaxGrowthRate(self, precision=0.0005, max=4, start_value=None, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
"""
minMu = 0
maxMu = max
if start_value is None:
testMu = minMu
else:
testMu = start_value
iteration = 0
while (maxMu - minMu) > precision:
self.setMu(Mu=testMu)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
self.recordResults('DichotomyMu_iteration_'+str(iteration))
minMu = testMu
else:
maxMu = testMu
testMu = numpy.mean([maxMu, minMu])
self.LogBook.addEntry('Maximal growth-rate found to be: {}.'.format(minMu))
if minMu == max:
print('Warning: Maximum growth rate might exceed specified range. Try rerunning this method with larger max-argument.')
self.setMu(Mu=minMu)
self.Problem.solveLP(logging=False)
self.Problem.SolutionType = 'GrowthRate_maximization'
return(minMu)
def knockOut(self, gene, loggingIntermediateSteps=False):
"""
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
"""
if type(gene) is str:
genes = [gene]
if type(gene) is list:
genes = gene
isoform_genes = [g for g in genes if g in list(self.ModelStructure.ProteinInfo.Elements.keys(
))]+[i for g in genes for i in self.ModelStructure.ProteinInfo.Elements.keys() if self.ModelStructure.ProteinInfo.Elements[i]['ProtoID'] == g]
for g in isoform_genes:
self.LogBook.addEntry('Gene {} knocked out.'.format(g))
ConsumersEnzymes = self.ModelStructure.ProteinInfo.Elements[g]['associatedEnzymes']
for i in ConsumersEnzymes:
LikeliestVarName = difflib.get_close_matches(i, self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
ConsumersProcess = self.ModelStructure.ProteinInfo.Elements[g]['SupportsProcess']
for i in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(
str(self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery'), self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
def FeasibleRange(self, variables=None, loggingIntermediateSteps=False):
"""
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
"""
if variables is None:
VariablesInQuestion = self.Problem.LP.col_names
else:
if isinstance(variables, list):
VariablesInQuestion = variables
elif isinstance(variables, str):
VariablesInQuestion = [variables]
out = {}
for i in VariablesInQuestion:
min = numpy.nan
max = numpy.nan
self.Problem.clearObjective(logging=loggingIntermediateSteps)
self.Problem.setObjectiveCoefficients(
inputDict={i: 1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
min = self.Problem.SolutionValues[i]
self.Problem.setObjectiveCoefficients(
inputDict={i: -1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = self.Problem.SolutionValues[i]
out.update({i: {'Min': min, 'Max': max}})
self.LogBook.addEntry(
'Feasible-range of {} determined to be between {} and {}.'.format(i, min, max))
return(out)
def ParetoFront(self, variable_X, variable_Y, N=10, sign_VY='max', loggingIntermediateSteps=False):
"""
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
if variable_X not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
if variable_Y not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
FR = self.FeasibleRange(variable_X)
cMin = FR[variable_X]['Min']
cMax = FR[variable_X]['Max']
concentrations = [float(cMin+(cMax-cMin)*i/N) for i in range(N+1)]
Out = pandas.DataFrame(columns=[variable_X, variable_Y])
oldLB = self.Problem.getLB(variable_X)
oldUB = self.Problem.getUB(variable_X)
iteration = -1
for conc in concentrations:
iteration += 1
self.Problem.setLB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.clearObjective(logging=loggingIntermediateSteps)
if sign_VY == 'max':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: -1}, logging=loggingIntermediateSteps)
if sign_VY == 'min':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: 1}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = abs(self.Problem.ObjectiveValue)
else:
max = numpy.nan
self.Problem.setLB(inputDict=oldLB, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict=oldUB, logging=loggingIntermediateSteps)
Out.loc[iteration, variable_X] = conc
Out.loc[iteration, variable_Y] = max
self.LogBook.addEntry(
'Pareto-front between {} and {} determined.'.format(variable_X, variable_Y))
return(Out)
### !!! Docstring ###
def buildFBA(self, type='classic', objective='classic', maintenanceToBM=False):
"""
Derives and constructs FBA-problem from RBA-problem and stores it under attribute 'FBA'.
Parameters
----------
type : str
objective : str
maintenanceToBM : boolean
"""
RBAproblem = self.Problem.LP
A = RBAproblem.A.toarray()
if type == 'classic':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith('R_') and not i.startswith('M_') and not i.endswith('_synthesis')]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if 'enzyme' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('M_')]
elif type == 'parsi':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith(
'R_') and not i.startswith('M_') and not i.endswith('_synthesis')]+[RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('R_') and not i.startswith('M_')]
if objective == 'classic':
if 'R_maintenance_atp' in RBAproblem.col_names:
Cols2remove.append(RBAproblem.col_names.index('R_maintenance_atp'))
Anew = numpy.delete(A, Cols2remove, axis=1)
col_namesNew = list(numpy.delete(RBAproblem.col_names, Cols2remove))
LBnew = numpy.delete(RBAproblem.LB, Cols2remove)
UBnew = numpy.delete(RBAproblem.UB, Cols2remove)
fNew = numpy.delete(RBAproblem.f, Cols2remove)
Anew2 = numpy.delete(Anew, Rows2remove, axis=0)
row_namesNew = list(numpy.delete(RBAproblem.row_names, Rows2remove))
row_signsNew = list(numpy.delete(RBAproblem.row_signs, Rows2remove))
bNew = numpy.delete(RBAproblem.b, Rows2remove)
trnaInds = [i for i in range(len(row_namesNew)) if row_namesNew[i].startswith(
'M_') and 'trna' in row_namesNew[i]]
# bNew[trnaInds] = 0
if objective == 'targets':
col_namesNew.append('R_BIOMASS_targetsRBA')
LBnew = numpy.append(LBnew, 0)
UBnew = numpy.append(UBnew, 10000)
fNew = numpy.append(fNew, 0)
BMrxnCol = numpy.ones((len(row_namesNew), 1))
BMrxnCol[:, 0] = bNew
if maintenanceToBM:
MaintenanceTarget = LBnew[col_namesNew.index('R_maintenance_atp')]
BMrxnCol[row_namesNew.index('M_atp_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h2o_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_adp_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_pi_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h_c')] -= MaintenanceTarget
LBnew[col_namesNew.index('R_maintenance_atp')] = 0
Anew2 = numpy.append(Anew2, -BMrxnCol, axis=1)
bNew = numpy.array([0]*Anew2.shape[0])
Matrix1 = RBA_Matrix()
Matrix1.A = scipy.sparse.coo_matrix(Anew2)
Matrix1.b = bNew
Matrix1.LB = LBnew
Matrix1.UB = UBnew
Matrix1.row_signs = row_signsNew
Matrix1.row_names = row_namesNew
Matrix1.col_names = col_namesNew
Matrix1.f = fNew
if type == 'classic':
Matrix1.b = numpy.array([0]*len(row_signsNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
elif type == 'parsi':
MetaboliteRows = {i: Matrix1.row_names.index(
i) for i in Matrix1.row_names if i.startswith('M_')}
EnzymeCols = {i: Matrix1.col_names.index(
i) for i in Matrix1.col_names if i.startswith('R_') and '_enzyme' in i}
Matrix2 = RBA_Matrix()
Matrix2.A = scipy.sparse.coo_matrix(numpy.zeros((len(MetaboliteRows), len(EnzymeCols))))
Matrix2.b = numpy.array(Matrix1.b[list(MetaboliteRows.values())])
Matrix2.LB = numpy.array(Matrix1.LB[list(EnzymeCols.values())])
Matrix2.UB = numpy.array(Matrix1.UB[list(EnzymeCols.values())])
Matrix2.f = numpy.array(Matrix1.f[list(EnzymeCols.values())])
Matrix2.row_signs = [Matrix1.row_signs[i] for i in list(MetaboliteRows.values())]
Matrix2.row_names = list(MetaboliteRows.keys())
Matrix2.col_names = list(EnzymeCols.keys())
Matrix2.mapIndices()
Matrix1.b = numpy.array([0]*len(bNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
LP1.updateMatrix(Matrix2)
self.FBA = RBA_FBA(LP1)
def findMinMediumConcentration(self, metabolite, precision=0.00001, max=100, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the minimal feasible concentration of
growth-substrate in medium, at a previously set growth-rate.
Parameters
----------
metabolite : str
ID of metabolite in medium.
precision : float
Numberic precision with which minimum is approximated.
Default : 0.00001
max : float
Defines the highest concentration rate to be screened for.
Default=100
recording : bool
Records intermediate feasible solutions
while approaching the minimum concentration.
Default : False
Returns
-------
minimum feasible growth-substrate concentration as float.
"""
minConc = 0.0
maxConc = max
testConc = minConc
iteration = 0
oldConc = self.Medium[metabolite]
while (maxConc - minConc) > precision:
self.setMedium(changes={metabolite: testConc})
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
run_name = 'Dichotomy_'+metabolite+'_' + \
str(testConc)+'_iteration_'+str(iteration)
self.recordResults(run_name)
maxConc = testConc
else:
minConc = testConc
testConc = numpy.mean([maxConc, minConc])
self.LogBook.addEntry(
'Minimal required {} concentration found to be: {}.'.format(metabolite, maxConc))
self.setMedium(changes={metabolite: oldConc})
return(maxConc)
def addProtein(self, input):
"""
Adds representation of individual proteins to problem.
Parameters
----------
input : dict or str
If input is str it has to be the ID of a protein in the model.
Then this protein is added to the problem an creates:
One constraint named Protein_'ID' (equality).
One variable named TotalLevel_'ID' representing the total amount.
One variable named Free_'ID'_'respectiveCompartment', this
represents the fraction of the protein not assuming any function.
It however consumes resources for synthesis (precursors and processes),
which are the same as defined in the model files.
And takes up space i the compartment as specified in the model-files
for the protein.
If input is dict it has to have two keys; 'ID' and 'UnusedProteinFraction'.
By specifying this input one can define that the unused franction of the protein
can also reside in other compartments and which processes it requires.
The value to 'ID' is the ID of a protein in the model.
The value to 'UnusedProteinFraction' is another dictionary.
This can have several keys which must be model-compartments.
For each of the keys the value is a dict holding IDs of model-processes as Keys
and process requirements as Values (numerical).
This specifies which processes each of the compartment-species of the protein
requires.
This generates the same constraint and TotalLevel-variable as with the simple input,
however a variable representing each of the compartment-species for the unused fraction
is added and incorporates the specific process requirements.
E.g: input = {'ID': 'proteinA',
'UnusedProteinFraction':{'Cytoplasm':{'Translation':100}, {'Folding':10}],
'Membrane':{'Translation':100}, {'Folding':20}, {'Secretion':100}
}
}
This adds 'proteinA' to the model, where the unused fraction can reside either in
the Cytoplasm or the Membrane. However while the cytosolic-species only requires the
processes 'Translation' and 'Folding'; the membrane-bound species also requires 'Secretion'
and occupies more folding capacity.
Then the constraint 'Protein_proteinA' is added and the 3 variables
'TotalLevel_proteinA', 'Free_proteinA_Cytoplasm' and 'Free_proteinA_Membrane'.
"""
if type(input) is str:
input = {'ID': input}
if 'ID' not in list(input.keys()):
print('Error, no protein ID provided')
return
if input['ID'] not in list(self.ModelStructure.ProteinInfo.Elements.keys()):
print('Error, protein not in model')
return
if 'UnusedProteinFraction' not in list(input.keys()):
input.update({'UnusedProteinFraction':
{self.ModelStructure.ProteinInfo.Elements[input['ID']]['Compartment']:
self.ModelStructure.ProteinInfo.Elements[input['ID']]['ProcessRequirements']}})
self.LogBook.addEntry('Protein {} added with specifications {}.'.format(
input['ID'], str(json.dumps(input))))
Muindexlist = []
## Building RBA_Matrix-object for new constraint-row, representing protein ##
UsedProtein = RBA_Matrix()
UsedProtein.A = scipy.sparse.coo_matrix(
buildUsedProteinConstraint(Controler=self, protein=input['ID']))
UsedProtein.b = numpy.array([float(0)])
UsedProtein.f = numpy.array(self.Problem.LP.f)
UsedProtein.LB = numpy.array(self.Problem.LP.LB)
UsedProtein.UB = numpy.array(self.Problem.LP.UB)
UsedProtein.row_signs = ['E']
UsedProtein.row_names = ['Protein_'+input['ID']]
UsedProtein.col_names = self.Problem.LP.col_names
## Add used protein row to problem ##
self.Problem.LP.addMatrix(matrix=UsedProtein)
## Add used protein row to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UsedProtein)
## Building RBA_Matrix-object for new variable-col, representing total level of protein ##
TotProtein = RBA_Matrix()
TotProtein.A = scipy.sparse.coo_matrix(numpy.array(numpy.matrix(
numpy.array([float(0)]*self.Problem.LP.A.shape[0]+[float(-1)])).transpose()))
TotProtein.f = numpy.array([float(0)])
TotProtein.LB = numpy.array([float(0)])
TotProtein.UB = numpy.array([float(100000.0)])
TotProtein.b = numpy.array(list(self.Problem.LP.b)+list(UsedProtein.b))
TotProtein.row_signs = self.Problem.LP.row_signs+UsedProtein.row_signs
TotProtein.row_names = self.Problem.LP.row_names+UsedProtein.row_names
TotProtein.col_names = ['TotalLevel_'+input['ID']]
## Add total protein col to problem ##
self.Problem.LP.addMatrix(matrix=TotProtein)
## Add total protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=TotProtein)
## Building RBA_Matrix-object for new variable-col,##
## representing each compartment-species of the protein ##
for comp_species in list(input['UnusedProteinFraction'].keys()):
## Initiate RBA_Matrix object##
UnusedProtein = RBA_Matrix()
UnusedProtein.col_names = ['Free_'+input['ID']+'_'+comp_species]
## Extract required processes for protein and the respective demand ##
ProcIDs = list(input['UnusedProteinFraction'][comp_species].keys())
Preq = list(input['UnusedProteinFraction'][comp_species].values())
ProcessCost = dict(
zip([self.ModelStructure.ProcessInfo.Elements[k]['ID'] for k in ProcIDs], Preq))
## Get required charged trna buildingblocks and their stoichiometry in protein ##
composition = self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAcomposition']
## Extract the composition of charged trnas in terms of metabolic species ##
species = self.ModelStructure.ProcessInfo.Elements['Translation']['Components']
## Determine required metabolites and their stoichiometry in protein ##
MetaboliteCost = buildCompositionofUnusedProtein(
species=species, composition=composition)
## Assemble process and metabolite requirements into stoichiometric coloumn vector ##
## And add to RBA_Matrix object ##
colToAdd = numpy.array(numpy.matrix(numpy.array(list(MetaboliteCost.values())+list(ProcessCost.values()) +
[float(1)]+[self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAnumber']])).transpose())
UnusedProtein.A = scipy.sparse.coo_matrix(colToAdd)
## Add other information to RBA_Matrix object ##
UnusedProtein.row_names = list(MetaboliteCost.keys())+[str(pc+'_capacity') for pc in list(
ProcessCost.keys())]+['Protein_'+input['ID']]+[str(comp_species + '_density')]
UnusedProtein.b = numpy.zeros(len(UnusedProtein.row_names))
UnusedProtein.row_signs = ['E']*len(UnusedProtein.row_names)
UnusedProtein.LB = numpy.array([float(0)])
UnusedProtein.UB = numpy.array([float(100000.0)])
UnusedProtein.f = numpy.array([float(0)])
self.ProteinDilutionIndices = list(
zip(list(MetaboliteCost.keys()), UnusedProtein.col_names*len(list(MetaboliteCost.keys()))))
## Add free protein col to problem ##
self.Problem.LP.addMatrix(matrix=UnusedProtein)
## Add free protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UnusedProtein)
## Find coefficients of unused protein column, subject to dilution (Metabolite and Process cost) ##
## And add them to MuDepIndices_A ##
nonZeroEntries = numpy.where(UnusedProtein.A != 0)[0]
self.Problem.MuDepIndices_A += [(UnusedProtein.row_names[i], UnusedProtein.col_names[0]) for i in nonZeroEntries if UnusedProtein.row_names[i]
!= 'Protein_'+input['ID'] and UnusedProtein.row_names[i] not in self.Problem.CompartmentDensities]
self.setMu(self.Problem.Mu)
## !!! ##
def eukaryoticDensities(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
Signs = ['L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L']
totalAA = 3.1*0.71
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*len(Compartments)+['E']
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
AlipidsA = numpy.zeros((7, len(Compartments)))
Alipids = RBA_Matrix()
Alipids.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs += ['E', 'E', 'E', 'E', 'E', 'E', 'E']
Alipids.b = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Alipids.LB = numpy.array([float(0)]*len(Compartments))
Alipids.UB = numpy.array([float(1)]*len(Compartments))
Alipids.f = numpy.array([float(0)]*len(Compartments))
AlipidsA[Alipids.row_names.index('M_pc_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000883*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00005852*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mIM')] = -0.00003377*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00000873*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'),
Alipids.col_names.index('F_mIM')] = -0.00002*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000039*totalAA
AlipidsA[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = -0.008547*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'),
('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM')]
AlipidsA[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = -0.000636*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0004822*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mOM')] = -0.0001289*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000167*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'), Alipids.col_names.index(
'F_mOM')] = -0.00004467*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000696*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c',
'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')]
Alipids.A = scipy.sparse.coo_matrix(AlipidsA)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), (
'M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), (
'M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
## !!! ##
def eukaryoticDensities2(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.69
m_mIM = 1.11
m_mIMS = 0.7
m_mOM = 7.2
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments)+1)
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
ConstraintMatrix = numpy.zeros((7, 0))
Alipids = RBA_Matrix()
Alipids.col_names = []
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([])
Alipids.UB = numpy.array([])
Alipids.f = numpy.array([])
MudepIndices = []
for pc in self.ModelStructure.ProcessInfo.Elements.keys():
if self.ModelStructure.ProcessInfo.Elements[pc]['ID'] not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
# Alipids.LB = numpy.array(list(Alipids.LB).append(list(self.Problem.LP.LB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.UB = numpy.array(list(Alipids.UB).append(list(self.Problem.LP.UB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.f = numpy.array(list(Alipids.f).append(list(self.Problem.LP.f)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
for p in self.ModelStructure.ProcessInfo.Elements[pc]['Composition'].keys():
lE = sum(list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values(
)))*self.ModelStructure.ProcessInfo.Elements[pc]['Composition'][p]
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ergst_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
ConstraintMatrix = ConstraintMatrixNew
for e in self.ModelStructure.EnzymeInfo.Elements.keys():
if e not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(e)
# xnew = list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)]
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)])])
# Alipids.LB = numpy.array(list(Alipids.LB).append(xnew))
# Alipids.UB = numpy.array(list(Alipids.UB).append(
# list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)]))
# Alipids.f = numpy.array(list(Alipids.f).append(
# list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)]))
for p in self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'].keys():
lE = sum(
list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values()))
lE *= self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'][p]['StochFac']
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
MudepIndices += ('M_ergst_c', e)
ConstraintMatrix = ConstraintMatrixNew
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(ConstraintMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.LP.updateMatrix(MuOneMatrix, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities3(self, totalAA=3.1, VolumeFraction=False, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
# A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
# A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
OccupationMatrix = RBA_Matrix()
# A = numpy.ones((len(Compartments)+1, len(Compartments)))
A = -numpy.eye(len(Compartments))
# Eye = -numpy.eye(len(Compartments))
# A[0:len(Compartments), :] = Eye
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
# OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1)+[float(totalAA)])
OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1))
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
# OccupationMatrix.row_signs = ['E']*(len(Compartments))+['L']
OccupationMatrix.row_signs = ['E']*(len(Compartments))
# OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
# 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalProtein']
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
CompartmentMatrix = RBA_Matrix()
if VolumeFraction:
A = numpy.eye(len(Compartments))*5/float(totalAA)
else:
A = numpy.eye(len(Compartments))/float(totalAA)
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments))
# CompartmentMatrix.row_signs = ['E']*(len(Compartments))
CompartmentMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume',
'm_volume', 'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume']
CompartmentMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
CompartmentMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
VolumeMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
# A[len(Compartments), [1, 5, 6, 8, 9]] = 0
# A[len(Compartments), 8] = 0
VolumeMatrix.A = scipy.sparse.coo_matrix(A)
VolumeMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
VolumeMatrix.f = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.LB = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.UB = numpy.array([float(1)]*len(Compartments))
VolumeMatrix.row_signs = ['L']*(len(Compartments))+['E']
# VolumeMatrix.row_signs = ['E']*(len(Compartments))+['E']
VolumeMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume', 'm_volume',
'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume', 'TotalVolume']
VolumeMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
if not CompartmentRelationships:
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
VolumeMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
VolumeMatrix.row_signs += ['E', 'E', 'E']
VolumeMatrix.b = numpy.array(list(VolumeMatrix.b)+[float(0)]*3)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_mOM')] = -m_mOM
VolumeMatrix.A = scipy.sparse.coo_matrix(Anew)
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
PC_vM = 0.0003635
PE_vM = 0.4156
PI_vM = 0.0001297
PS_vM = 0.00003435
CL_vM = 0.0000068
PA_vM = 0.0000186
ES_vM = 0.0142
PC_n = 0.000055
PE_n = 0.000035
PI_n = 0.000017
PS_n = 0.0000072
CL_n = 0.0
PA_n = 0.0000031
ES_n = 0.0086
PC_gM = 0.00043
PE_gM = 0.00044
PI_gM = 0.00041
PS_gM = 0.0
CL_gM = 0.00022
PA_gM = 0.0
ES_gM = 0.0
PC_n = 0.0
PE_n = 0.0
PI_n = 0.0
PS_n = 0.0
CL_n = 0.0
PA_n = 0.0
ES_n = 0.0
PC_gM = 0.0
PE_gM = 0.0
PI_gM = 0.0
PS_gM = 0.0
CL_gM = 0.0
PA_gM = 0.0
ES_gM = 0.0
PC_vM = 0.0
PE_vM = 0.0
PI_vM = 0.0
PS_vM = 0.0
CL_vM = 0.0
PA_vM = 0.0
ES_vM = 0.0
PC_mIM = 0.0
PE_mIM = 0.0
PI_mIM = 0.0
PS_mIM = 0.0
CL_mIM = 0.0
PA_mIM = 0.0
ES_mIM = 0.0
PC_mOM = 0.0
PE_mOM = 0.0
PI_mOM = 0.0
PS_mOM = 0.0
CL_mOM = 0.0
PA_mOM = 0.0
ES_mOM = 0.0
Alipids = RBA_Matrix()
Alipids.col_names = ['F_mIM', 'F_mOM', 'F_vM', 'F_n', 'F_gM']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([0, 0, 0, 0, 0])
Alipids.UB = numpy.array([1, 1, 1, 1, 1])
Alipids.f = numpy.array([0, 0, 0, 0, 0])
LipidMatrix = numpy.zeros((7, 5))
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mIM')] = PC_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mIM')] = PE_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mIM')] = PI_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mIM')] = PS_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mIM')] = CL_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mIM')] = PA_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = ES_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = PC_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mOM')] = PE_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mOM')] = PI_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mOM')] = PS_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mOM')] = CL_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mOM')] = PA_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mOM')] = ES_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_vM')] = PC_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_vM')] = PE_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_vM')] = PI_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_vM')] = PS_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_vM')] = CL_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_vM')] = PA_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_vM')] = ES_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_n')] = PC_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_n')] = PE_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_n')] = PI_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_n')] = PS_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_n')] = CL_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_n')] = PA_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_n')] = ES_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_gM')] = PC_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_gM')] = PE_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_gM')] = PI_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_gM')] = PS_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_gM')] = CL_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_gM')] = PA_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_gM')] = ES_gM/totalAA
MudepIndices = [('M_pc_SC_c', i) for i in Alipids.col_names]+[('M_pe_SC_c', i) for i in Alipids.col_names]+[('M_ptd1ino_SC_c', i) for i in Alipids.col_names]+[('M_ps_SC_c', i)
for i in Alipids.col_names]+[('M_clpn_SC_m', i) for i in Alipids.col_names]+[('M_pa_SC_c', i) for i in Alipids.col_names]+[('M_ergst_c', i) for i in Alipids.col_names]
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(LipidMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities4(self, CompartmentRelationships=True):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
m_mIM = 0.5
m_mIMS = 1
m_mOM = 5
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': 'AAres_PG_nucleus_DNA'})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'O_total': {'Equation': 'amino_acid_concentration_total - AAres_PG_secreted_Euk', 'Variables': ['amino_acid_concentration_total', 'AAres_PG_secreted_Euk']}})
self.Problem.MuDependencies['FromMatrix']['b'].remove('n_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('vM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIMS_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('m_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('erM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mOM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('x_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('cM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('gM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('c_density')
## !!! ##
def eukaryoticDensities_calibration(self, CompartmentRelationships=False, mitoProportions={}, amino_acid_concentration_total='amino_acid_concentration_total'):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA_parameter = amino_acid_concentration_total
totalAA = 3.1
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
if len(list(mitoProportions.keys())) == 3:
m_mIM = mitoProportions['m_mIM']
m_mIMS = mitoProportions['m_mIMS']
m_mOM = mitoProportions['m_mOM']
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': {'Equation': '-nonenzymatic_proteins_n/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_n', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIM_density': {
'Equation': '-nonenzymatic_proteins_mIM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'vM_density': {
'Equation': '-nonenzymatic_proteins_vM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_vM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIMS_density': {
'Equation': '-nonenzymatic_proteins_mIMS/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIMS', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'m_density': {'Equation': '-nonenzymatic_proteins_m/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_m', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'erM_density': {
'Equation': '-nonenzymatic_proteins_erM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_erM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mOM_density': {
'Equation': '-nonenzymatic_proteins_mOM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mOM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'x_density': {'Equation': '-nonenzymatic_proteins_x/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_x', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'cM_density': {
'Equation': '-nonenzymatic_proteins_cM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_cM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'gM_density': {
'Equation': '-nonenzymatic_proteins_gM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_gM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'c_density': {'Equation': '-nonenzymatic_proteins_c/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_c', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'O_total': {'Equation': '{} - nonenzymatic_proteins_Secreted/inverse_average_protein_length'.format(totalAA_parameter), 'Variables': [
totalAA_parameter, 'nonenzymatic_proteins_Secreted', 'inverse_average_protein_length']}})
# !!! deal with hardcoded parameter_names... !!!
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True, parsimonious_fba=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
from scipy.stats.mstats import gmean
old_model = copy.deepcopy(self.model)
for i in self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements:
if i.species == 'average_protein_c':
new_agg = rba.xml.parameters.Aggregate(id_='total_protein', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='amino_acid_concentration_total'))
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='inverse_average_protein_length'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_protein'
else:
self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements.remove(
i)
for i in self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements:
if i.species == 'mrna':
new_agg = rba.xml.parameters.Aggregate(id_='total_rna', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='RNA_massfraction_CarbonLimitation'))
new_agg.function_references.append(
rba.xml.parameters.FunctionReference(function='RNA_inversemillimolarweight'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_rna'
else:
self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements.remove(
i)
self.rebuild_from_model()
self.setMedium(self.Medium)
self.addExchangeReactions()
self.setMu(mu)
if target_biomass_function:
self.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.Medium.keys() if self.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.clearObjective()
self.FBA.setObjectiveCoefficients({BMfunction: -1})
self.FBA.solveLP()
BMfluxOld = self.FBA.SolutionValues[BMfunction]
if parsimonious_fba:
self.FBA.parsimonise()
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.setLB({BMfunction: BMfluxOld})
self.FBA.setUB({BMfunction: BMfluxOld})
self.FBA.solveLP()
FluxDistribution = pandas.DataFrame(index=list(
self.FBA.SolutionValues.keys()), columns=['FluxValues'])
FluxDistribution['FluxValues'] = list(self.FBA.SolutionValues.values())
BMfluxNew = self.FBA.SolutionValues[BMfunction]
ProtoIDmap = {}
for i in self.ModelStructure.ProteinInfo.Elements.keys():
ProtoID = self.ModelStructure.ProteinInfo.Elements[i]['ProtoID']
if ProtoID in list(proteomicsData['ID']):
if not pandas.isna(proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]):
if proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0] != 0:
if ProtoID in ProtoIDmap.keys():
ProtoIDmap[ProtoID]['ModelProteins'].append(i)
else:
ProtoIDmap.update(
{ProtoID: {'ModelProteins': [i], 'CopyNumber': proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]}})
ReactionMap = {}
for i in self.ModelStructure.ReactionInfo.Elements.keys():
if '_duplicate_' in i:
continue
else:
if i in list(FluxDistribution.index):
if FluxDistribution.loc[i, 'FluxValues'] != 0:
ReactionMap.update({i: {'ModelReactions': list(
[i]+self.ModelStructure.ReactionInfo.Elements[i]['Twins']), 'Flux': FluxDistribution.loc[i, 'FluxValues']}})
IsoReaction2ProtoReaction = {}
for i in ReactionMap.keys():
for j in ReactionMap[i]['ModelReactions']:
IsoReaction2ProtoReaction[j] = i
EnzymeMap = {}
for i in self.ModelStructure.EnzymeInfo.Elements.keys():
if self.ModelStructure.EnzymeInfo.Elements[i]['Reaction'] in IsoReaction2ProtoReaction:
CompositionDict = {self.ModelStructure.ProteinInfo.Elements[j]['ProtoID']: self.ModelStructure.EnzymeInfo.Elements[
i]['Subunits'][j] for j in self.ModelStructure.EnzymeInfo.Elements[i]['Subunits'].keys()}
ProtoReaction = IsoReaction2ProtoReaction[self.ModelStructure.EnzymeInfo.Elements[i]['Reaction']]
CopyNumbers = []
Stoichiometries = []
EnzymeNumbers = []
for j in CompositionDict.keys():
if j in ProtoIDmap.keys():
CopyNumbers.append(ProtoIDmap[j]['CopyNumber'])
Stoichiometries.append(CompositionDict[j])
EnzymeNumbers.append(ProtoIDmap[j]['CopyNumber']/CompositionDict[j])
GM_enzymenumber = 0
if len(EnzymeNumbers) > 0:
GM_enzymenumber = gmean(numpy.array(EnzymeNumbers))
EnzymeMap.update(
{i: {'ProtoReaction': ProtoReaction, 'EnzymeNumber': GM_enzymenumber}})
EnzymeMap2 = {}
for i in ReactionMap.keys():
totalIsoEnzymeNumber = 0
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
totalIsoEnzymeNumber += EnzymeMap[respectiveEnzyme]['EnzymeNumber']
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
concentration = EnzymeMap[respectiveEnzyme]['EnzymeNumber']
if concentration != 0:
if numpy.isfinite(concentration):
specificFlux = ReactionMap[i]['Flux'] * \
EnzymeMap[respectiveEnzyme]['EnzymeNumber']/totalIsoEnzymeNumber
EnzymeMap2.update({respectiveEnzyme: {'CopyNumber': EnzymeMap[respectiveEnzyme]['EnzymeNumber'],
'Concentration': concentration, 'Flux': specificFlux, 'Kapp': abs(specificFlux/concentration)}})
self.model = old_model
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
for i in EnzymeMap2.keys():
# if EnzymeMap2[i]['CopyNumber'] == 0:
# continue
out.loc[i, 'Enzyme_ID'] = i
out.loc[i, 'CopyNumber'] = EnzymeMap2[i]['CopyNumber']
out.loc[i, 'Concentration'] = EnzymeMap2[i]['Concentration']
out.loc[i, 'Flux'] = EnzymeMap2[i]['Flux']
out.loc[i, 'Kapp'] = EnzymeMap2[i]['Kapp']
return(out)
def estimate_default_Kapps(self, target_mu, compartment_densities_and_PGs=None, flux_bounds=None, plateau_limit=4, mu_approximation_precision=0.005, transporter_to_lumen_coefficient=10, default_kapp_LB=0, default_kapp_UB=1000000, start_val=200000, densities_to_fix=None, eukaryotic=False):
"""
Parameters
----------
target_mu : float
compartment_densities_and_PGs : pandas.DataFrame
flux_bounds : pandas.DataFrame
"""
orig_enz = self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value
out = pandas.DataFrame()
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
self.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
self.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
kapp_LB = default_kapp_LB
if default_kapp_UB is not None:
kapp_UB = default_kapp_UB
else:
kapp_UB = orig_enz*1000
# new_kapp = (kapp_UB+kapp_LB)/2
if start_val is not None:
new_kapp = start_val
else:
new_kapp = orig_enz
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus = []
Mus_Error = []
Kapps = []
last_Mu = numpy.nan
plateau_count = 0
if abs(target_mu - Mu_pred) > mu_approximation_precision:
while abs(target_mu - Mu_pred) > mu_approximation_precision:
if plateau_count >= plateau_limit:
break
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = new_kapp
self.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = transporter_to_lumen_coefficient*new_kapp
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus_Error.append(abs(target_mu - Mu_pred))
Mus.append(Mu_pred)
Kapps.append(new_kapp)
if Mu_pred > target_mu:
new_kapp_prelim = kapp_LB+(0.5*abs(kapp_LB-new_kapp))
kapp_UB = new_kapp
elif Mu_pred < target_mu:
new_kapp_prelim = kapp_UB-(0.5*abs(new_kapp-kapp_UB))
kapp_LB = new_kapp
new_kapp = new_kapp_prelim
if len(Mus) > 2:
if Mus[-2] == Mu_pred:
plateau_count += 1
else:
plateau_count = 0
else:
Mus.append(Mu_pred)
Mus_Error.append(abs(target_mu - Mu_pred))
Kapps.append(
self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value)
self.rebuild_from_model()
self.setMedium(self.Medium)
out = | pandas.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: abhijit
"""
#%% preamble
import numpy as np
import pandas as pd
from glob import glob
#%% Tidy data
filenames = glob('data/table*.csv')
filenames = sorted(filenames)
table1, table2, table3, table4a, table4b, table5 = [ | pd.read_csv(f) | pandas.read_csv |
import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timedelta,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetime64:
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
def test_datetimeindex_accessors2(self):
dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
def test_datetimeindex_accessors3(self):
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
def test_datetimeindex_accessors4(self):
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
with | tm.assert_produces_warning(FutureWarning, match="The 'freq' argument") | pandas._testing.assert_produces_warning |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 18:17:07 2021
@author: alber
"""
import os
import pandas as pd
import numpy as np
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pickle
import lightgbm as lgb
from os import walk
from scipy import stats
from statsmodels.stats.power import TTestIndPower
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
df_metrics_h_test = pd.DataFrame()
### Sample Size
# parameters for power analysis
effect = 0.8
alpha = 0.1 # Ojo al alpha, que no es 0.5
power = 0.8
# perform power analysis
analysis = TTestIndPower()
result = analysis.solve_power(effect, power=power, nobs1=None, ratio=1.0, alpha=alpha)
print('Sample Size: %.3f' % result)
df_kappa_limits = pd.DataFrame(
{
'limit_k': [0, 0.2, 0.4],
'category': ['poor', 'slight', 'fair']
}
)
# =============================================================================
# Best Models based on CV
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
### Load CV - Psychological
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x, encoding="latin-1") for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_raw = df_results_aff_cv
df_raw = (
df_raw
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_raw = df_raw.merge(df_names, how="left").round(2)
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
df_raw = (
df_raw
.merge(df_names, how="left")
.drop(columns=['es_name'])
)
### Get the metrics per emotion tag
df_results_aff = (
df_raw
.groupby(by=['category', 'regression_model', 'semantic_model'])
.mean()
.reset_index()
)
df_results_aff['mean_metric'] = (
(df_results_aff['kappa']+
df_results_aff['auc'])
/
2
)
df_median_ref = (
df_results_aff
.groupby(by=['regression_model', 'semantic_model'])
.median()
.reset_index()
.copy()
[['regression_model', 'semantic_model', 'f1_weighted', 'kappa', 'auc', 'corr']]
.rename(columns={
'f1_weighted': 'f1_weighted_median',
'kappa': 'kappa_median',
'auc': 'auc_median',
'corr': 'corr_median'
})
)
df_results_aff = df_results_aff[df_results_aff['auc']>0.5]
df_results_aff = df_results_aff[df_results_aff.fillna(0)['corr']>=0]
# Remove baselines
df_results_aff = df_results_aff[
(df_results_aff['regression_model'] != 'class_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'class_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_knn') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_rbf') &
(df_results_aff['regression_model'] != 'class_dummy_classifier') &
(df_results_aff['regression_model'] != 'reg_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'reg_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'reg_label_spreading_base') &
(df_results_aff['regression_model'] != 'reg_dummy_classifier')
].copy()
# Remove unused semantic models
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
df_results_aff = df_results_aff[
df_results_aff['semantic_model'].isin(list_semantic_models)]
df_results_aff = (
df_results_aff
.sort_values(by=['category', 'mean_metric'], ascending=False)
.groupby(by=['category'])
.first()
.reset_index()
)
df_results_aff = (
df_results_aff.merge(df_names, how="left").drop(columns=['es_name'])
)
df_results = df_results_aff[[
'en_name', 'semantic_model', 'regression_model',
'f1_weighted', 'kappa', 'auc', 'corr'
]].copy().round(2)
df_reference = df_results
# df_reference = df_results[[
# 'en_name', 'semantic_model', 'classification_model',
# 'f1_weighted', 'kappa', 'auc'
# ]].copy().round(2)
df_reference = df_reference.merge(df_median_ref, how="left")
### Add data distribution
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
list_kfolds = []
n_folds = 21
for i in range(n_folds):
df_gt = pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
df_gt = df_gt.rename(columns={"text": "text_original"})
df_gt.columns = [str(x).rstrip().lstrip() for x in list(df_gt.columns)]
df_add = pd.DataFrame()
for category in list_names:
if category in list_aff:
continue
try:
df_iter = df_gt.groupby(category).apply(lambda s: s.sample(2))
except:
continue
df_add = df_add.append(df_iter)
df_add = df_add.drop_duplicates()
# New GT (without data used in training)
df_gt = df_gt[~df_gt["index"].isin(df_add["index"])].copy()
## Check no affective feature categories are missing
for category in list_aff:
l1 = list(df_add[category].unique())
l2 = list(df_gt[category].unique())
if len(l1)<len(l2):
l3 = [x for x in l2 if x not in l1]
df_add_new = df_gt[df_gt[category].isin(l3)]
df_add_new = df_add_new.drop_duplicates(subset=category)
df_add = df_add.append(df_add_new)
df_gt = df_gt[~df_gt["index"].isin(df_add_new["index"])].copy()
list_kfolds.append([{i: {'df_gt': df_gt, 'df_add': df_add}}])
df_distribution = pd.DataFrame()
for iter_item in list_kfolds:
iter_item = [x for x in iter_item[0].values()][0]['df_gt']
for category in list_aff:
data_cat = (
pd.DataFrame(iter_item[category].copy().value_counts())
.T
.reset_index()
.rename(columns={'index':'en_name'})
)
df_distribution = df_distribution.append(data_cat)
df_distribution = df_distribution.groupby(by=['en_name']).mean().reset_index().round(1)
df_distribution = df_distribution.replace("fear", "Fear (ordinal)")
df_distribution = df_distribution.replace("happinness", "happiness")
df_reference = df_distribution.merge(df_reference)
df_reference.round(2).to_csv(
"tables_paper/df_results_emotions_reference.csv", index=False)
# =============================================================================
# Differences vs. Baselines
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_raw = df_results_aff_cv
df_raw = df_raw.merge(df_names, how="left").drop(columns=['es_name'])
# Set missing SMOTE models as non-SMOTE results
df_aux = df_raw[(df_raw['regression_model']=='class_baseline_lightgbm') &
(df_raw['category']=='happinness')
].copy()
df_aux['regression_model'] = 'class_baseline_smote_lightgbm'
df_raw = df_raw.append(df_aux)
df_aux = df_raw[(df_raw['regression_model']=='class_baseline_lightgbm') &
(df_raw['category']=='fear')
].copy()
df_aux['regression_model'] = 'class_baseline_smote_lightgbm'
df_raw = df_raw.append(df_aux)
list_baselines = [
'class_baseline_lightgbm',
'class_baseline_smote_lightgbm',
'class_dummy_classifier',
'reg_baseline_lightgbm'
]
# Iter and get results
df_metrics = pd.DataFrame()
for i, row in df_reference.iterrows():
for baseline in list_baselines:
df_1 = df_raw[
(df_raw['semantic_model']==row['semantic_model']) &
(df_raw['regression_model']==row['regression_model']) &
(df_raw['en_name']==row['en_name'])
]
df_2 = df_raw[
(df_raw['semantic_model']==row['semantic_model']) &
(df_raw['regression_model']==baseline) &
(df_raw['en_name']==row['en_name'])
]
list_f1_df1 = list(df_1['f1_weighted'].values)
list_f1_df2 = list(df_2['f1_weighted'].values)
list_kappa_df1 = list(df_1['kappa'].values)
list_kappa_df2 = list(df_2['kappa'].values)
list_auc_df1 = list(df_1['auc'].values)
list_auc_df2 = list(df_2['auc'].values)
list_corr_df1 = list(df_1['corr'].values)
list_corr_df2 = list(df_2['corr'].values)
try:
_, pVal_f1 = stats.kruskal(list_f1_df1, list_f1_df2)
_, pVal_kappa = stats.kruskal(list_kappa_df1, list_kappa_df2)
_, pVal_auc = stats.kruskal(list_auc_df1, list_auc_df2)
_, pVal_corr = stats.kruskal(list_corr_df1, list_corr_df2)
except:
pVal_f1 = 1
pVal_kappa = 1
pVal_auc = 1
pVal_corr = 1
df_metrics_iter = pd.DataFrame(
{'category': [row['en_name']],
'semantic_model': [row['semantic_model']],
'prediction_model_1': [row['regression_model']],
'prediction_model_2': [baseline],
'mean_1_f1': [np.mean(list_f1_df1)],
'mean_2_f1': [np.mean(list_f1_df2)],
'median_1_f1': [np.median(list_f1_df1)],
'median_2_f1': [np.median(list_f1_df2)],
'p-value_f1': [pVal_f1],
'mean_1_kappa': [np.mean(list_kappa_df1)],
'mean_2_kappa': [np.mean(list_kappa_df2)],
'median_1_kappa': [np.median(list_kappa_df1)],
'median_2_kappa': [np.median(list_kappa_df2)],
'p-value_kappa': [pVal_kappa],
'mean_1_auc': [np.mean(list_auc_df1)],
'mean_2_auc': [np.mean(list_auc_df2)],
'median_1_auc': [np.median(list_auc_df1)],
'median_2_auc': [np.median(list_auc_df2)],
'p-value_auc': [pVal_auc],
'mean_1_corr': [np.mean(list_corr_df1)],
'mean_2_corr': [np.mean(list_corr_df2)],
'median_1_corr': [np.median(list_corr_df1)],
'median_2_corr': [np.median(list_corr_df2)],
'p-value_corr': [pVal_corr],
}
)
df_metrics = df_metrics.append(df_metrics_iter)
df_metrics.round(2).to_csv(
"tables_paper/df_results_emotions_cv_vs_baseline.csv", index=False)
# Plot Data
df_aux = (df_metrics[['category', 'prediction_model_1', 'mean_1_auc', 'p-value_auc']]
.rename(columns={
'prediction_model_1':'prediction_model',
'mean_1_auc':'mean_auc'})
)
df_aux['prediction_model'] = 'best reference'
df_plot = (
df_metrics[['category', 'prediction_model_2', 'mean_2_auc', 'p-value_auc']]
.rename(columns={'prediction_model_2':'prediction_model',
'mean_2_auc':'mean_auc'})
.append(df_aux)
)
df_plot = df_plot[df_plot['prediction_model']!='class_dummy_classifier']
df_plot = df_plot[df_plot['prediction_model']!='reg_dummy_classifier']
df_plot = df_plot[df_plot['prediction_model']!='reg_baseline_lightgbm']
df_plot = df_plot[df_plot['prediction_model']!='baseline_affective']
df_plot = df_plot.replace("Fear (ordinal)", "fear (ordinal)")
df_plot = df_plot.replace("anger", "anger (ordinal)")
plt.figure(figsize=(16, 10), dpi=250)
sns.set_theme(style="darkgrid")
sns.set(font_scale=1.2)
plot_fig = sns.barplot(data = df_plot,
x = 'category',
y = 'mean_auc',
hue = 'prediction_model'
)
plot_fig.set(
ylabel = "AUC Value",
xlabel = 'Psychological Category'
)
plot_fig.set_title(
"AUC metrics versus baseline models",
fontdict = {'fontsize':16},
pad = 12
)
plt.ylim(0, 0.9)
plot_fig.set_xticklabels(
plot_fig.get_xticklabels(), rotation=45, horizontalalignment='right')
# plot_fig.set_yticklabels(
# plot_fig.get_yticklabels(), rotation=360, horizontalalignment='right')
plt.legend(loc="upper left")
plt.savefig('results/df_plot_emotions_metrics_vs_baseline.png', dpi=250)
plt.show()
### Analysis (manual)
df_analysis = df_plot[(df_plot['p-value_auc']>0.1) & (df_plot['prediction_model']!='best reference')].copy()
# =============================================================================
# Differences vs. original DISCO (Emotions)
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions - All
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_results_aff_cv = (
df_results_aff_cv
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_results_aff_cv = df_results_aff_cv.merge(df_names, how="left").round(2).drop(columns=['es_name'])
### Load CV - Psychological - DISCO
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_DISCO"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_disco_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_disco_cv = df_results_disco_cv.append(
df_iter
)
df_results_disco_cv = (
df_results_disco_cv
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_results_disco_cv = df_results_disco_cv.merge(df_names, how="left").round(2).drop(columns=['es_name'])
### Iter and get results
df_metrics = pd.DataFrame()
for i, row in df_reference.iterrows():
df_1 = df_results_aff_cv[
(df_results_aff_cv['semantic_model']==row['semantic_model']) &
(df_results_aff_cv['regression_model']==row['regression_model']) &
(df_results_aff_cv['en_name']==row['en_name'])
]
df_2 = df_results_disco_cv[
(df_results_disco_cv['semantic_model']==row['semantic_model']) &
(df_results_disco_cv['regression_model']==row['regression_model']) &
(df_results_disco_cv['en_name']==row['en_name'])
]
list_f1_df1 = list(df_1['f1_weighted'].values)
list_f1_df2 = list(df_2['f1_weighted'].values)
list_kappa_df1 = list(df_1['kappa'].values)
list_kappa_df2 = list(df_2['kappa'].values)
list_auc_df1 = list(df_1['auc'].values)
list_auc_df2 = list(df_2['auc'].values)
try:
_, pVal_f1 = stats.kruskal(list_f1_df1, list_f1_df2)
_, pVal_kappa = stats.kruskal(list_kappa_df1, list_kappa_df2)
_, pVal_auc = stats.kruskal(list_auc_df1, list_auc_df2)
except:
pVal_f1 = 1
pVal_kappa = 1
pVal_auc = 1
df_metrics_iter = pd.DataFrame(
{'category': [row['en_name']],
'semantic_model': [row['semantic_model']],
'prediction_model': [row['regression_model']],
'comb_1': ['All'],
'comb_2': ['DISCO'],
'mean_1_f1': [np.mean(list_f1_df1)],
'mean_2_f1': [np.mean(list_f1_df2)],
'median_1_f1': [np.median(list_f1_df1)],
'median_2_f1': [np.median(list_f1_df2)],
'p-value_f1': [pVal_f1],
'mean_1_kappa': [np.mean(list_kappa_df1)],
'mean_2_kappa': [np.mean(list_kappa_df2)],
'median_1_kappa': [np.median(list_kappa_df1)],
'median_2_kappa': [np.median(list_kappa_df2)],
'p-value_kappa': [pVal_kappa],
'mean_1_auc': [np.mean(list_auc_df1)],
'mean_2_auc': [np.mean(list_auc_df2)],
'median_1_auc': [np.median(list_auc_df1)],
'median_2_auc': [np.median(list_auc_df2)],
'p-value_auc': [pVal_auc],
}
)
df_metrics = df_metrics.append(df_metrics_iter)
df_metrics.round(2).to_csv(
"tables_paper/df_results_emotion_all_vs_disco_cv.csv", index=False)
# Plot Data
df_aux = (df_metrics[['category', 'comb_1', 'mean_1_auc', 'p-value_auc']]
.rename(columns={
'comb_1':'configuration',
'mean_1_auc':'mean_auc'})
)
df_aux['configuration'] = 'best reference'
df_aux_2 = (df_metrics[['category', 'comb_2', 'mean_2_auc', 'p-value_auc']]
.rename(columns={'comb_2':'configuration',
'mean_2_auc':'mean_auc'})
)
df_aux_2['configuration'] = 'only DISCO'
df_plot = (
df_aux_2
.append(df_aux)
)
df_plot = df_plot.replace("Fear (ordinal)", "fear (ordinal)")
df_plot = df_plot.replace("anger", "anger (ordinal)")
df_plot.round(2).to_csv(
"tables_paper/df_results_emotions_q3.csv", index=False)
# =============================================================================
# Differences vs not using affective
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions - All
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_results_aff_cv = (
df_results_aff_cv
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_results_aff_cv = df_results_aff_cv.merge(df_names, how="left").round(2).drop(columns=['es_name'])
### Load CV - Psychological - DISCO
f_path = f"{PATH_RESULTS}/results_cv/emotion_no_aff"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_disco_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x) for x in filenames
])
df_iter['iter'] = folder
df_results_disco_cv = df_results_disco_cv.append(
df_iter
)
df_results_disco_cv = (
df_results_disco_cv
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_results_disco_cv = df_results_disco_cv.merge(df_names, how="left").round(2).drop(columns=['es_name'])
### Iter and get results
df_metrics = pd.DataFrame()
for i, row in df_reference.iterrows():
df_1 = df_results_aff_cv[
(df_results_aff_cv['semantic_model']==row['semantic_model']) &
(df_results_aff_cv['regression_model']==row['regression_model']) &
(df_results_aff_cv['en_name']==row['en_name'])
]
df_2 = df_results_disco_cv[
(df_results_disco_cv['semantic_model']==row['semantic_model']) &
(df_results_disco_cv['regression_model']==row['regression_model']) &
(df_results_disco_cv['en_name']==row['en_name'])
]
list_f1_df1 = list(df_1['f1_weighted'].values)
list_f1_df2 = list(df_2['f1_weighted'].values)
list_kappa_df1 = list(df_1['kappa'].values)
list_kappa_df2 = list(df_2['kappa'].values)
list_auc_df1 = list(df_1['auc'].values)
list_auc_df2 = list(df_2['auc'].values)
try:
_, pVal_f1 = stats.kruskal(list_f1_df1, list_f1_df2)
_, pVal_kappa = stats.kruskal(list_kappa_df1, list_kappa_df2)
_, pVal_auc = stats.kruskal(list_auc_df1, list_auc_df2)
except:
pVal_f1 = 1
pVal_kappa = 1
pVal_auc = 1
df_metrics_iter = pd.DataFrame(
{'category': [row['en_name']],
'semantic_model': [row['semantic_model']],
'prediction_model': [row['regression_model']],
'comb_1': ['All'],
'comb_2': ['DISCO'],
'mean_1_f1': [np.mean(list_f1_df1)],
'mean_2_f1': [np.mean(list_f1_df2)],
'median_1_f1': [np.median(list_f1_df1)],
'median_2_f1': [np.median(list_f1_df2)],
'p-value_f1': [pVal_f1],
'mean_1_kappa': [np.mean(list_kappa_df1)],
'mean_2_kappa': [np.mean(list_kappa_df2)],
'median_1_kappa': [np.median(list_kappa_df1)],
'median_2_kappa': [np.median(list_kappa_df2)],
'p-value_kappa': [pVal_kappa],
'mean_1_auc': [np.mean(list_auc_df1)],
'mean_2_auc': [np.mean(list_auc_df2)],
'median_1_auc': [np.median(list_auc_df1)],
'median_2_auc': [np.median(list_auc_df2)],
'p-value_auc': [pVal_auc],
}
)
df_metrics = df_metrics.append(df_metrics_iter)
df_metrics.round(2).to_csv(
"tables_paper/df_results_emotion_all_vs_no_aff_cv.csv", index=False)
# Plot Data
df_aux = (df_metrics[['category', 'comb_1', 'mean_1_auc', 'p-value_auc']]
.rename(columns={
'comb_1':'configuration',
'mean_1_auc':'mean_auc'})
)
df_aux['configuration'] = 'best reference'
df_aux_2 = (df_metrics[['category', 'comb_2', 'mean_2_auc', 'p-value_auc']]
.rename(columns={'comb_2':'configuration',
'mean_2_auc':'mean_auc'})
)
df_aux_2['configuration'] = 'without additional features'
df_plot = (
df_aux_2
.append(df_aux)
)
df_plot = df_plot.replace("Fear (ordinal)", "fear (ordinal)")
df_plot = df_plot.replace("anger", "anger (ordinal)")
df_plot = df_plot[df_plot['configuration']!='best reference']
df_plot_previous = | pd.read_csv("tables_paper/df_results_emotions_q3.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 16:32:57 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from eotg import eotg
#%% Importar datos desde csv descargado de banxico
usdmxn_csv = pd.read_csv('Precios/tipoCambio.csv', index_col='Fecha').dropna()
#usdmxn = usdmxn[~usdmxn['Determinación'].isin(['N/E'])]
usdmxn_csv.columns=['USD/MXN']
usdmxn_csv.plot(figsize=(12,6))
#%% Calcular rendimientos anuales usando eotg.calc_annual_ret
usdmxn = usdmxn_csv
usdmxn.index = | pd.to_datetime(usdmxn.index, yearfirst=True, box=True ,infer_datetime_format=True) | pandas.to_datetime |
import os
import ast
import pandas as pd
import numpy as np
from datetime import datetime
import time
import logging
level_config = {'debug': logging.DEBUG, 'info': logging.INFO}
FILE_SIZE = 500
BYTES_PER_PKT = 1500.0*8
MILLISEC_IN_SEC = 1000.0
EXP_LEN = 1000 # millisecond
class Metric:
def __init__(self,name,mi=1., lbd=1., mi_s=1.,log_level='debug'):
self.name = name
self.mi = mi
self.lbd = lbd
self.mi_s = mi_s
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
self.logger = logging.getLogger(__name__)
def calc(self,listRate,listRebuffer):
pass
def tabulation(self,listQoE,scores = pd.DataFrame(),abrRule = 'abr Rule',prefix=''):
scores_tmp = pd.DataFrame()
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores_tmp['Average value'] = np.asarray([i[0] for i in listQoE])
scores_tmp['Metrics'] = [ self.name for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[1] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Bitrate Utility' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp['Average value'] = np.asarray([i[2] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Smoothness Penalty' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[3] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Rebuffering Penalty' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = | pd.DataFrame() | pandas.DataFrame |
"""
Functions used to compile water quality data from files that have already undergone basic formatting to have the same
column headers and units. List of data sources is available in readme.md file.
Functions:
* format_lake_data: Create additional columns for date and sampling frequency and round to daily means
* calc_growth_window: Detects the growth window for each lake in each year it's sampled using the daily mean dataframe,
and sifts for the data within the growth window and during the pre-growth window period
* growth_window_means: Calculates rates and mean values for environmental variables during each growth window and during
the pre-growth window period
* gw_summary: prints a summary of statistics for bloom type and lake trophic status in the dataset
* select_daily_mean:
* get_tsi: calculate the trophic status index (TSI) for each lake and create a dataframe with columns for lake, TSI, and
trophic status
* get_coords_ts: assign coordinates and trophic status to each lake
* lake_summary:
<NAME>
"""
import pandas as pd
from dplython import DplyFrame, X, sift, select, arrange, mutate
import numpy as np
from scipy.signal import find_peaks
from scipy.signal import savgol_filter
def format_lake_data(all_lakes):
"""
General formatting for lake data. Adds columns for date (year, month, day, and day of year) and calculates the
number of samples collected each year. Creates a separate dataframe rounded to the daily mean and sifted for at
least 6 samples collected per year.
input:
all_lakes: Compiled DplyFrame containing in situ data for all lakes to be analyzed
output:
all_lakes: Compiled data with additional columns (not rounded to daily mean)
daily_mean: additional data frame containing the daily mean values for all numerical parameters
"""
# convert columns to appropriate data type
all_lakes.loc[:, 'chla'] = pd.to_numeric(all_lakes.loc[:, 'chla'])
all_lakes.loc[:, 'temp'] = pd.to_numeric(all_lakes.loc[:, 'temp'])
# convert date to datetime and create additional columns
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
all_lakes.loc[:, 'year'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).year
all_lakes.loc[:, 'month'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).month
all_lakes.loc[:, 'day'] = pd.datetimeIndex(all_lakes.loc[:, 'date']).day
all_lakes.loc[:, 'day_of_year'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D').dayofyear
# round to the nearest day and convert back to datetime
all_lakes.loc[:, 'date'] = pd.PeriodIndex(all_lakes.loc[:, 'date'], freq='D')
all_lakes.loc[:, 'date'] = all_lakes.loc[:, 'date'].astype(str)
all_lakes.loc[:, 'date'] = pd.to_datetime(all_lakes.loc[:, 'date'])
# calculate daily mean
daily_mean = DplyFrame(all_lakes.groupby(['lake', 'date'], as_index=False).mean())
# arrange by date and drop rows where chlorophyll-a is not a number (nan)
daily_mean = daily_mean >> arrange(X.date)
daily_mean.dropna(subset=['chla'], inplace=True)
# add column for number of samples
master_mean_df = pd.DataFrame()
for name, group in daily_mean.groupby(['lake', 'year']):
group.loc[:, 'num_samples'] = len(group['chla'])
master_mean_df = DplyFrame(pd.concat([master_mean_df, group], axis=0))
daily_mean = DplyFrame(master_mean_df) >> sift(X.num_samples >= 6)
return all_lakes, daily_mean
def calc_growth_window(df, threshold_inc, num_sample_threshold):
"""
Detects the growth window period based on the the rate of change in chlorophyll-a concentration that has been
smoothed with the Savitzky-Golay filter. First, optima are flagged in the data using the find_peaks function,
indicating the end of a growth window. The growth window begins at the preceding minimum or when the rate
increases past the num_sample threshold (and if it doesn't increase past that threshold, it begins where the
rate increases above zero). Daily mean data is sifted for samples collected both within the growth window and
during the 1 and 2 weeks leading up to it (the pre-growth window), to be analyzed by the growth_window_means
function. See associated manuscript for full explanation of methods and rationale.
input:
df: DplyFrame containing daily mean in situ data for all lakes to be analyzed (from format_lake_data)
threshold_inc: minimum chlorophyll-a rate of change to constitute the start of the growth window when there
is no minimum flagged in the data.
num_sample_threshold: Minimum number of samples per year that will be retained in the growth window dataset.
output:
master_gw_df: Water quality data for all detected growth windows, compiled into one DplyFrame
springsummer_gw_doy: Dataframe containing the day of year for the start and end of each growth window
master_prev_2weeks_gw_df: Compiled water quality data for each 2 week pre-growth window
"""
# make empty dataframes (will be appended to later)
master_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day', 'chla_increase', 'chla_roc',
'chla', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
master_prev_2weeks_gw_df = pd.DataFrame(columns=['lake', 'date', 'year', 'season', 'day_of_year', 'start_day', 'end_day',
'chla', 'chla_roc', 'poc', 'tp', 'srp', 'par', 'ph', 'tkn', 'tdn', 'nh4', 'no2',
'no3', 'nox'])
# sift data for minimum sampling frequency
df = df >> sift(X.num_samples >= num_sample_threshold)
for name, group in df.groupby(['lake', 'year']): # group by lake and year to detect growth windows
group.reset_index(inplace=True)
# determine savgol_filter window length (smaller window for fewer samples)
if group.loc[0, 'num_samples'] <= 15:
window_len = 3
else:
window_len = 5
# 1) smooth the data and find location of the optima along the smoothed line
savgol = savgol_filter(group['chla'], window_length=window_len, polyorder=1)
group.loc[:, 'savgol_chla'] = savgol
# calculate chlorophyll rate of change and flag all days above the threshold as true
group.loc[:, 'chla_roc'] = group.loc[:, 'savgol_chla'].diff() / group.loc[:, 'day_of_year'].diff()
group.loc[:, 'chla_increase'] = group.loc[:, 'chla_roc'].gt(threshold_inc)
# find peaks and minima
y = group['savgol_chla']
peaks, properties = find_peaks(y, prominence=2)
y2 = y * -1 # use -y to find the minima
minima, min_properties = find_peaks(y2, prominence=0.5)
# flag peaks in the dataframe
peaks = DplyFrame(peaks)
peak_df = group.loc[group.index.intersection(peaks[0])]
peak_df['max_flag'] = True
group = pd.merge(group, (peak_df >> select(X.day_of_year, X.max_flag)), how='left', left_on='day_of_year',
right_on='day_of_year')
# flag minima in the dataframe
minima = DplyFrame(minima)
trough_df = group.loc[group.index.intersection(minima[0])]
trough_df['min_flag'] = True
group = pd.merge(group, (trough_df >> select(X.day_of_year, X.min_flag)), how='left',
left_on='day_of_year', right_on='day_of_year')
# 2) find spring and summer or single growth windows for lakes with 2 or 1 defined peaks, respectively
num_peaks = len(group['max_flag'].dropna()) # count the number of optima in the data
if num_peaks == 2: # spring and summer growth windows occur
# find end date of growth window
spring_end_index = group.where(group.max_flag == True).first_valid_index()
spring_end_day = group.loc[spring_end_index, 'day_of_year']
# find start date of growth window
spring_group = group >> sift(X.day_of_year < spring_end_day)
num_minima = len(spring_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
spring_start_index = spring_group.where(spring_group.chla_increase == True).first_valid_index()
if spring_start_index is None: # if there is no valid increase beforehand
spring_start_index = spring_group.where(spring_group.chla_roc > 0).first_valid_index() # find first day with a rate above zero
if spring_start_index is None:
spring_start_day = spring_group.loc[spring_group.first_valid_index(), 'day_of_year'] # select first sampling day
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > 0
else:
spring_start_day = spring_group.loc[(spring_start_index - 1), 'day_of_year'] # select first day with rate > threshold_inc
if num_minima > 0: # a previous minimum is present
spring_start_index = spring_group.where(spring_group.min_flag == True).last_valid_index() # select day with minimum closest to the max
spring_start_day = spring_group.loc[spring_start_index, 'day_of_year']
# sift growth window data based on start and end dates
spring_gw = group >> sift(X.day_of_year <= spring_end_day) >> sift(X.day_of_year >= spring_start_day)
spring_gw.loc[:, 'season'] = 'spring'
spring_gw.loc[:, 'start_day'] = spring_start_day
spring_gw.loc[:, 'end_day'] = spring_end_day
# sift out 1 and 2 week pre-growth window data
spring_prev_2weeks_start_day = spring_start_day - 15
prev_2weeks_spring_df = group >> sift(X.day_of_year >= spring_prev_2weeks_start_day) >> sift(
X.day_of_year <= spring_start_day)
prev_2weeks_spring_df.loc[:, 'season'] = 'spring'
prev_2weeks_spring_df.loc[:, 'start_day'] = spring_prev_2weeks_start_day
prev_2weeks_spring_df.loc[:, 'end_day'] = spring_start_day
# append spring gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, spring_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_spring_df], axis=0)
# sift out spring data and repeat for summer
summer_df = group >> sift(X.day_of_year > spring_end_day)
# find end date of growth window
summer_end_index = summer_df.where(summer_df.max_flag == True).first_valid_index()
summer_end_day = summer_df.loc[summer_end_index, 'day_of_year']
# find start date of growth window
summer_group = summer_df >> sift(X.day_of_year < summer_end_day)
num_minima = len(summer_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
summer_start_index = summer_group.where(summer_group.chla_increase == True).first_valid_index()
if summer_start_index is None:
summer_start_index = summer_group.where(summer_group.chla_roc > 0).first_valid_index()
if summer_start_index is None:
summer_start_day = summer_group.loc[summer_group.first_valid_index(), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index-1), 'day_of_year']
else:
summer_start_day = summer_group.loc[(summer_start_index - 1), 'day_of_year']
if num_minima > 0: # a previous min is present
summer_start_index = summer_group.where(summer_group.min_flag == True).first_valid_index()
summer_start_day = summer_group.loc[summer_start_index, 'day_of_year']
# sift summer growth window data based on start and end dates
summer_gw = summer_df >> sift(X.day_of_year <= summer_end_day) >> sift(X.day_of_year >= summer_start_day)
summer_gw.loc[:, 'season'] = 'summer'
summer_gw.loc[:, 'start_day'] = summer_start_day
summer_gw.loc[:, 'end_day'] = summer_end_day
# sift out 1 and 2 week pre-growth window data
summer_prev_2weeks_start_day = summer_start_day - 15
prev_2weeks_summer_df = group >> sift(X.day_of_year >= summer_prev_2weeks_start_day) >> sift(
X.day_of_year <= summer_start_day)
prev_2weeks_summer_df.loc[:, 'season'] = 'summer'
prev_2weeks_summer_df.loc[:, 'start_day'] = summer_prev_2weeks_start_day
prev_2weeks_summer_df.loc[:, 'end_day'] = summer_start_day
# append summer gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, summer_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_summer_df], axis=0)
if num_peaks == 1: # single growth window
# find end date of growth window
single_gw_end_index = group.where(group.max_flag == True).first_valid_index()
single_gw_end_day = group.loc[single_gw_end_index, 'day_of_year']
# find start date of growth window
single_group = group >> sift(X.day_of_year < single_gw_end_day)
num_minima = len(single_group['min_flag'].dropna())
if num_minima == 0: # no previous min, use the first increase above threshold_inc
single_gw_start_index = single_group.where(single_group.chla_increase == True).first_valid_index()
if single_gw_start_index is None:
single_gw_start_index = single_group.where(single_group.chla_roc > 0).first_valid_index()
if single_gw_start_index is None:
single_gw_start_day = single_group.loc[single_group.first_valid_index(), 'day_of_year']
else:
single_gw_start_day = single_group.loc[(single_gw_start_index-1), 'day_of_year']
else:
single_gw_start_day = single_group.loc[(single_gw_start_index - 1), 'day_of_year']
if num_minima > 0: # a previous min is present
single_gw_start_index = single_group.where(single_group.min_flag == True).last_valid_index()
single_gw_start_day = single_group.loc[single_gw_start_index, 'day_of_year']
# sift single growth window data based on start and end dates
single_gw_gw = single_group >> sift(X.day_of_year <= single_gw_end_day) >> sift(X.day_of_year >= single_gw_start_day)
single_gw_gw.loc[:, 'season'] = 'single'
single_gw_gw.loc[:, 'start_day'] = single_gw_start_day
single_gw_gw.loc[:, 'end_day'] = single_gw_end_day
# sift out 1 and 2 week pre-growth window data
single_gw_prev_2weeks_start_day = single_gw_start_day - 15
prev_2weeks_single_gw_df = group >> sift(X.day_of_year >= single_gw_prev_2weeks_start_day) >> sift(
X.day_of_year <= single_gw_start_day)
prev_2weeks_single_gw_df.loc[:, 'season'] = 'single'
prev_2weeks_single_gw_df.loc[:, 'start_day'] = single_gw_prev_2weeks_start_day
prev_2weeks_single_gw_df.loc[:, 'end_day'] = single_gw_start_day
# append single gw data to main dataframe
master_gw_df = pd.concat([master_gw_df, single_gw_gw], axis=0)
master_prev_2weeks_gw_df = pd.concat([master_prev_2weeks_gw_df, prev_2weeks_single_gw_df], axis=0)
# create a separate doy file
springsummer_gw_doy = DplyFrame(master_gw_df) >> select(X.lake, X.year, X.season, X.start_day, X.end_day)
springsummer_gw_doy.drop_duplicates(inplace=True)
return master_gw_df, springsummer_gw_doy, master_prev_2weeks_gw_df
def growth_window_means(spring_and_summer_doy, spring_and_summer_selected, prev_2weeks_springsummer_data, min_gw_length, t_max, t_min, t_opt):
"""
This function calculates chlorophyll-a rate, maximum chlorophyll-a concentration, accumulated chlorophyll-a,and mean
values for environmental variables during each growth window. Mean water temperature, solar radiation, and total
phosphorus is calculated for the pre-growth window period. The chlorophyll-a rate of increase is corrected for
temperature using the f_temp calculation (Rosso et al., 1995).
input:
spring_and_summer_doy: dataframe with the start and end day of year for each growth window
spring_and_summer_selected: dataframe with the chlorophyll concentration and temperature for each sampling
day within each growth window
prev_2weeks_springsummer_data: dataframe containing all lake data for the 2 weeks leading up to the spring and summer growth windows
min_gw_length: minimum length for the growth window (set to 5 for now)
t_max: maximum temperature for the f_temp function
t_min: minimum temperature for the f_temp function
t_opt: optimum temperature for the f_temp function
output:
springsummer_gw_data: dataframe with a row for each lake/year/season with the chlorophyll rate of increase and
mean temperature during the growth window and pre-growth window period
"""
print('calculating means')
# calculate growth window length in "spring and summer doy" file and merge with "spring and summer selected"
spring_and_summer_doy = spring_and_summer_doy >> mutate(growth_window_length=X.end_day - X.start_day)
springsummer_data = pd.merge(spring_and_summer_selected, spring_and_summer_doy, how='left',
left_on=['lake', 'year', 'season', 'start_day', 'end_day'],
right_on=['lake', 'year', 'season', 'start_day', 'end_day'])
# make an empty dataframe
springsummer_gw_data = pd.DataFrame(columns=['lake', 'year', 'season', 'chla_rate', 'max_chla', 'poc_rate', 'chla_to_poc',
'gw_temp', 'gw_tp', 'gw_srp', 'gw_secchi', 'gw_ph',
'gw_tkn', 'gw_tdn', 'gw_length',
'start_day', 'end_day', 'specific_chla_rate', 'f_temp',
'temp_corrected_specific_chla_rate'])
for name, group in springsummer_data.groupby(['lake', 'year', 'season']):
first_index = group.first_valid_index() # first index in the group
last_index = group.last_valid_index() # last index in the group
group.loc[:, 'gw_length'] = group.loc[last_index, 'day_of_year'] - group.loc[first_index, 'day_of_year'] # growth window length (days)
# calculate the chlorophyll-a rate, specific rate, and max concentration
group.loc[:, 'chla_max-min'] = group.loc[last_index, 'chla'] - group.loc[first_index, 'chla']
group.loc[:, 'chla_rate'] = group.loc[:, 'chla_max-min'] / group.loc[:, 'gw_length']
group.loc[:, 'specific_chla_rate'] = group.loc[:, 'chla_rate'] / group.loc[first_index, 'chla']
group.loc[:, 'max_chla'] = group.loc[:, 'chla'].max()
# Calculate accumulated chlorophyll-a as the area under the curve during the growth window
group.loc[:, 'acc_chla'] = np.trapz(group.loc[:, 'savgol_chla'], x=group.loc[:, 'day_of_year'])
# calculate the rate of change in poc concentration (mg/L)
group.loc[:, 'poc_max-min'] = group.loc[last_index, 'poc'] - group.loc[first_index, 'poc']
group.loc[:, 'poc_rate'] = group.loc[:, 'poc_max-min'] / group.loc[:, 'gw_length']
# calculate chla:poc ratio after converting chlorophyll-a to mg/L
group.loc[:, 'chla_to_poc'] = (group.loc[:, 'chla']/1000) /group.loc[:, 'poc']
# calculate mean environmental variables during the window
group.loc[:, 'gw_temp'] = group.loc[:, 'temp'].mean()
mean_temp = group.loc[:, 'temp'].mean() # save mean temperature as an object for f_temp calculation
group.loc[:, 'gw_tp'] = group.loc[:, 'tp'].mean()
group.loc[:, 'gw_secchi'] = group.loc[:, 'secchi'].mean()
group.loc[:, 'gw_poc'] = group.loc[:, 'poc'].mean()
group.loc[:, 'gw_ph'] = group.loc[:, 'ph'].mean()
group.loc[:, 'gw_tkn'] = group.loc[:, 'tkn'].mean()
group.loc[:, 'gw_srp'] = group.loc[:, 'srp'].mean()
# calculate f_temp
group.loc[:, 'f_temp'] = (mean_temp - t_max) * (mean_temp - t_min) ** 2 / (
(t_opt - t_min) * ((t_opt - t_min) * (mean_temp - t_opt) - (t_opt - t_max) * (
t_opt + t_min - 2 * mean_temp)))
# divide specific growth rate by f_temp
group.loc[:, 'temp_corrected_specific_chla_rate'] = group.loc[:, 'specific_chla_rate'] / group.loc[:, 'f_temp']
# keep one row for each lake/year/season append each group to the empty dataframe
chla_temp = group.head(1)
springsummer_gw_data = pd.concat([springsummer_gw_data, chla_temp], axis=0)
# 2 week pre-growth window calculations
prev_2weeks_data = pd.DataFrame(columns=['lake', 'year', 'season', 'pre_gw_temp', 'pre_gw_tp', 'pre_gw_tkn'])
for name, group in prev_2weeks_springsummer_data.groupby(['lake', 'year', 'season']):
# calculate mean water quality variables during the window
group.loc[:, 'pre_gw_temp'] = group.loc[:, 'temp'].mean()
group.loc[:, 'pre_gw_tp'] = group.loc[:, 'tp'].mean()
group.loc[:, 'pre_gw_tkn'] = group.loc[:, 'tkn'].mean()
# keep one row and concatenate onto the prev_2weeks_data dataframe
prev_2wks = group.head(1)
prev_2wks = DplyFrame(prev_2wks) >> select(X.lake, X.year, X.season, X.pre_gw_temp, X.pre_gw_tp,
X.pre_gw_tkn)
prev_2weeks_data = pd.concat([prev_2weeks_data, prev_2wks], axis=0)
# merge the three dataframes together
springsummer_gw_data = pd.merge(springsummer_gw_data, prev_2weeks_data, left_on=['lake', 'year', 'season'],
right_on=['lake', 'year', 'season'], how='left')
# sift columns based on chlorophyll rate and growth window length
springsummer_gw_data = DplyFrame(springsummer_gw_data) >> sift(X.chla_rate >= 0) >> sift(
X.gw_length >= min_gw_length)
# select columns to export
springsummer_gw_data = springsummer_gw_data >> select(X.lake, X.year, X.season, X.start_day, X.end_day, X.gw_length,
X.chla_rate, X.max_chla, X.acc_chla, X.poc_rate,
X.chla_to_poc, X.gw_temp, X.gw_tp,
X.gw_secchi, X.gw_ph, X.gw_srp, X.gw_tkn,
X.specific_chla_rate, X.f_temp,
X.temp_corrected_specific_chla_rate, X.pre_gw_temp,
X.num_samples, X.pre_gw_tp, X.pre_gw_tkn)
return springsummer_gw_data
def gw_summary(gw_data):
# print % of each growth window type
perc_spring = len(gw_data.loc[(gw_data['season'] == 'spring')]) / len(gw_data['season']) * 100
perc_summer = len(gw_data.loc[(gw_data['season'] == 'summer')]) / len(gw_data['season']) * 100
perc_single = len(gw_data.loc[(gw_data['season'] == 'single')]) / len(gw_data['season']) * 100
print("percent spring: ", perc_spring)
print("percent summer: ", perc_summer)
print("percent single gw: ", perc_single)
# print number of lakes in each trophic status
oligo = len(gw_data.loc[(gw_data['trophic_status'] == 'oligotrophic')])
meso = len(gw_data.loc[(gw_data['trophic_status'] == 'mesotrophic')])
eu = len(gw_data.loc[(gw_data['trophic_status'] == 'eutrophic')])
hyper = len(gw_data.loc[(gw_data['trophic_status'] == 'hypereutrophic')])
print("number oligotrophic: ", oligo)
print("number mesotrophic: ", meso)
print("number eutrophic: ", eu)
print("number hypereutrophic: ", hyper)
# print % of each trophic status
perc_oligo = oligo / len(gw_data['trophic_status']) * 100
perc_meso = meso / len(gw_data['trophic_status']) * 100
perc_eu = eu / len(gw_data['trophic_status']) * 100
perc_hyper = hyper / len(gw_data['trophic_status']) * 100
print("percent oligotrophic: ", perc_oligo)
print("percent mesotrophic: ", perc_meso)
print("percent eutrophic: ", perc_eu)
print("percent hypereutrophic: ", perc_hyper)
def select_daily_mean(daily_mean, gw_data):
"""
Select the lakes in the daily_mean file that are retained in the final growth window.
Input:
daily_mean: dataframe with all compiled daily mean water quality data
gw_data: growth window dataframe (output from the growth_window_means function)
Output:
selected_daily_mean: Dataframe of daily mean data for all lakes within the growth window dataset
"""
final_lakes_list = gw_data.lake.unique()
boolean_series = daily_mean.lake.isin(final_lakes_list)
selected_daily_mean = daily_mean[boolean_series]
return selected_daily_mean
def get_tsi_coords(df, coords_df):
"""
This function calculates the trophic status index (TSI) for each lake using the mean chlorophyll-a concentration
for all samples and the equation provided by the North American lake Management Society (NALMS). A trophic
status is assigned to each station based on the TSI. More information on the NALMS guidelines can be found here:
https://www.nalms.org/secchidipin/monitoring-methods/trophic-state-equations.
input:
df: dataframe of daily mean values
coords_df: dataframe of coordinates to merge with trophic status data
output:
trophic_status_df: dataframe with 'tsi' and 'trophic status' columns added
ts_coords: dataframe with columns for lake, tsi, trophic_status, lake_lat, and lake_long
"""
# group by lake to calculate mean chlorophyll-a concentration
chla_average = df.groupby(['lake'], as_index=False).chla.mean()
# rename chla and calculate tsi
chla_average.rename(columns={'chla': 'total_mean_chla'}, inplace=True)
chla_average.loc[:, 'tsi'] = (9.81 * np.log(chla_average['total_mean_chla'])) + 30.6
trophic_status_df = pd.merge(df, chla_average, how='left', left_on='lake', right_on='lake')
# assign trophic status class to each subset of the dataframe
oligo_df = trophic_status_df >> sift(X.tsi < 40)
oligo_df['trophic_status'] = 'oligotrophic'
meso_df = trophic_status_df >> sift(X.tsi < 50) >> sift(X.tsi >= 40)
meso_df['trophic_status'] = 'mesotrophic'
eut_df = trophic_status_df >> sift(X.tsi < 70) >> sift(X.tsi >= 50)
eut_df['trophic_status'] = 'eutrophic'
hyper_df = trophic_status_df >> sift(X.tsi >= 70)
hyper_df['trophic_status'] = 'hypereutrophic'
# append together
ts_list = [oligo_df, meso_df, eut_df, hyper_df]
trophic_status_df = pd.concat(ts_list, axis=0)
# keep first row for the summary
trophic_status_summary = trophic_status_df.groupby(['lake']).head(1) >> select(X.lake, X.tsi, X.trophic_status)
# merge tsi with coordinate file
ts_coords = pd.merge(trophic_status_summary, coords_df, how='left', left_on=['lake'], right_on=['lake'])
return trophic_status_summary, ts_coords
def lake_summary(daily_mean, ts_coords):
"""
Creates a summary table with one row for each lake in the daily_mean dataframe
input:
daily_mean: dataframe with lake data to be summarized
ts_coords: dataframe with columns for lake, tsi, trophic_status, lake_lat, and lake_long
output:
lake_summary: dataframe with one row for each lake, summarizing the sampling start and end dates, list of
variables sampled, trophic status, etc.
"""
daily_mean = daily_mean >> arrange(X.date)
lake_summary = pd.DataFrame(columns=['lake', 'monitoring_organization', 'start_sampling', 'end_sampling',
'days_sampled', 'years_sampled', 'parameters'])
for name, group in daily_mean.groupby('lake'):
group.reset_index(inplace=True)
group_summary = pd.DataFrame(columns=['lake', 'monitoring_organization', 'start_sampling', 'end_sampling',
'days_sampled', 'parameters'])
group_summary.loc[0, 'lake'] = group.loc[0, 'lake']
group_summary.loc[0, 'start_sampling'] = group.loc[0, 'year']
group_summary.loc[0, 'end_sampling'] = group.loc[(len(group)-1), 'year']
group_summary.loc[0, 'days_sampled'] = len(group['day'])
group_summary = group_summary >> mutate(years_sampled=X.start_sampling-X.end_sampling + 1)
variable_df = group.drop(['lake', 'date', 'year', 'month', 'day', 'day_of_year', 'num_samples',
'index'], axis=1)
variable_df.dropna(how='all', axis=1, inplace=True)
group_summary.loc[0, 'variables'] = list(variable_df.columns.values)
lake_summary = | pd.concat([lake_summary, group_summary], axis=0) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.