prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Provincial road network risks and adaptation maps
"""
import os
import sys
from collections import OrderedDict
import ast
import numpy as np
import geopandas as gpd
import pandas as pd
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from vtra.utils import *
def main():
config = load_config()
regions = ['Lao Cai', 'Binh Dinh', 'Thanh Hoa']
hazard_cols = ['hazard_type','climate_scenario','year']
duration = 10
hazard_set = [
{
'hazard': 'landslide',
'name': 'Landslide'
},
{
'hazard': 'flashflood',
'name':'Flashflood'
},
{
'hazard': 'flooding',
'name': 'Fluvial flooding'
},
{
'hazard': 'typhoon flooding',
'name': 'Typhoon flooding'
}
]
change_colors = ['#1a9850','#66bd63','#a6d96a','#d9ef8b','#fee08b','#fdae61','#f46d43','#d73027','#969696']
change_labels = ['< -40','-40 to -20','-20 to -10','-10 to 0','0 to 10','10 to 20','20 to 40',' > 40','No change/value']
change_ranges = [(-1e10,-40),(-40,-20),(-20,-10),(-10,0),(0.001,10),(10,20),(20,40),(40,1e10)]
eael_set = [
{
'column': 'min_eael',
'title': 'Min EAEL',
'legend_label': "Expected Annual losses ('000 USD)",
'divisor': 1000,
'significance': 0
},
{
'column': 'max_eael',
'title': 'Max EAEL',
'legend_label': "Expected Annual losses ('000 USD)",
'divisor': 1000,
'significance': 0
}
]
adapt_set = [
{
'column': 'min_eael',
'title': 'Min EAEL',
'legend_label': "Expected Annual losses ('000 USD)",
'divisor': 1000,
'significance': 0
},
{
'column': 'max_eael',
'title': 'Max EAEL',
'legend_label': "Expected Annual losses ('000 USD)",
'divisor': 1000,
'significance': 0
},
{
'column': 'min_ini_adap_cost',
'title': 'Min Initial Investment',
'legend_label': "Initial investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_ini_adap_cost',
'title': 'Max Initial Investment',
'legend_label': "Initial investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_benefit',
'title': 'Min Benefit over time',
'legend_label': "Benefit (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_benefit',
'title': 'Max Benefit over time',
'legend_label': "Benefit (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_tot_adap_cost',
'title': 'Min Investment over time',
'legend_label': "Total Investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_tot_adap_cost',
'title': 'Max Investment over time',
'legend_label': "Total Investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_bc_ratio',
'title': 'Min BCR of adaptation over time',
'legend_label': "BCR",
'divisor': 1,
'significance': 0
},
{
'column': 'max_bc_ratio',
'title': 'Max BCR of adaptation over time',
'legend_label': "BCR",
'divisor': 1,
'significance': 0
}
]
adapt_set = [
{
'column': 'min_ini_adap_cost_perkm',
'title': 'Min Initial Investment per km',
'legend_label': "Initial Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_ini_adap_cost_perkm',
'title': 'Max Initial Investment per km',
'legend_label': "Initial Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_tot_adap_cost_perkm',
'title': 'Min Investment per km over time',
'legend_label': "Total Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_tot_adap_cost_perkm',
'title': 'Max Investment per km over time',
'legend_label': "Total Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
]
adapt_cols = ['min_benefit','min_ini_adap_cost','min_ini_adap_cost_perkm','min_tot_adap_cost','min_tot_adap_cost_perkm','min_bc_ratio',\
'max_benefit','max_ini_adap_cost','max_ini_adap_cost_perkm','max_tot_adap_cost','max_tot_adap_cost_perkm','max_bc_ratio']
for region in regions:
region_file_path = os.path.join(config['paths']['data'], 'post_processed_networks',
'{}_roads_edges.shp'.format(region.lower().replace(' ', '')))
flow_file_path = os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_minmax_{}_5_tons_100_percent_disrupt.csv'.format(region.lower().replace(' ', '')))
region_file = gpd.read_file(region_file_path,encoding='utf-8')
flow_file = pd.read_csv(flow_file_path)
region_file = pd.merge(region_file,flow_file,how='left', on=['edge_id']).fillna(0)
del flow_file
flow_file_path = os.path.join(config['paths']['output'], 'adaptation_results',
'output_adaptation_{}_10_days_max_disruption_fixed_parameters.csv'.format(region.lower().replace(' ', '')))
fail_scenarios = | pd.read_csv(flow_file_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Wrapper(s) and helpers for easily handling DataFrames in the context
of creating sklearn models.
"""
__all__ = ["DataFrameModifier", "read_excel", "read_csv"]
__version__ = "0.1"
__author__ = "<NAME>"
import copy
import logging
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn import preprocessing
#from statsmodels.stats.outliers_influence import variance_inflation_factor
#from statsmodels.tsa.stattools import LinAlgError
clock_fn = lambda a: (pd.to_numeric(a) % (1E9 * 3600 * 24)).astype(float) / 1E9 # seconds since midnight
time_fn = lambda a, b: (pd.to_numeric(a) - pd.to_numeric(b)).astype(float) / 1E9 # delta seconds
day_plus_hour = lambda d, h: pd.to_numeric(d) / 1E9 + clock_fn(h) # date in seconds since epoch
def read_excel(path, sheet, datetime_columns=[], header=0, **kwargs):
"""Read Excel file, correctly interpreting Date and Time columns that
can be addressed by name instead of column index.
:param path: Path to source file
:type path: str or path-like object
:param str sheet: Name of the sheet you're interested in
:param list datetime_columns: Columns with these names are parsed
as date-columns
:param int header: The 0-based index of the header row in the
source file
:param \*\*kwargs: Further parameters are passed on
to `pandas.read_excel()`_
:return: pd.DataFrame
.. _`pandas.read_excel()`: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
"""
h = pd.read_excel(path, sheet, header=header).columns
parse_dates = [i for i, name in enumerate(h) if name in datetime_columns]
df = pd.read_excel(path, sheet, header=header,
parse_dates=parse_dates, **kwargs)
return df
def read_csv(path, datetime_columns=[], header=0, **kwargs):
"""Read CSV file, correctly interpreting Date and Time columns that
can be addressed by name instead of column index.
:param path: Path to source file
:type path: str or path-like object
:param list datetime_columns: Columns with these names are parsed
as date-columns during file reading
:param int header: The 0-based index of the header row in the
source file
:param \*\*kwargs: Further parameters are passed on
to `pandas.read_csv()`_
:return: pd.DataFrame
.. _`pandas.read_csv()`: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
"""
encoding = kwargs.get('encoding', 'utf8')
h = pd.read_csv(path, header=header, encoding=encoding).columns
parse_dates = [i for i, name in enumerate(h) if name in datetime_columns]
df = pd.read_csv(path, header=header, encoding=encoding,
parse_dates=parse_dates, **kwargs)
return df
class DataFrameModifier:
"""Defines modifications to be performed on a raw pandas DataFrame
in the sense of a data preprocessor if you have i.e. a heterogeneous
Excel table as source that contains different data types and descriptive
or other non-relevant columns to be excluded, categorical columns
to be converted or want to filter some data.
Advantages are:
- Being able to use a pd.DataFrame as input for sklearn and other
ML models by performing all the relevant preprocessing
on the DataFrame instead of a naked values array (allows to access
data by their column names).
- High level functions for data interaction / transformation
Disadvantages:
- Possible overhead of the data structure that may lack performance
for large data sets
Typically you will use:
- :meth:`exclude_column` explicitly excludes columns.
- :meth:`include_column` alternatively for a complementary definition
of relevant columns
- :meth:`cat_columns` explicitly tells which columns are of
categorical data type
- :meth:`filters` will mask certain values (i.e. set to ``np.nan``)(out-of-tolerance, outliers
:param bool drop_nan_y: Drop rows where target column is nan
:param bool drop_nan_x: Drop rows where *any* predictor column is nan
:param bool time_to_float: Automatically convert datetime and
timedelta columns to float
.. _`Variance Inflation Factor`: https://en.wikipedia.org/wiki/Variance_inflation_factor
"""
CAT_MODES_LABEL = ("label", "int", "ordinal", "ord")
CAT_MODE_LABEL = "label"
CAT_MODES_1HOT = ("1-hot", "1hot", "onehot", "one-hot", "binary", "bin")
CAT_MODE_1HOT= "1-hot"
CAT_MODES = CAT_MODES_LABEL + CAT_MODES_1HOT
def __init__(self,
drop_nan_y=True,
drop_nan_x=False,
time_to_float=True,
):
self._calc_columns = OrderedDict()
self._exclude_columns = []
self._include_columns = []
self._dont_prune_columns = []
self._categorical_columns = []
self._categorical_columns_map = {}
self._categorical_columns_encoder = {}
self._filters = {}
self.x_columns = self.xi_columns = self.y_columns = self.yi_columns = None
self._baseline_mean = []
self._drop_nan_y = drop_nan_y
self._drop_nan_x = drop_nan_x
self._time_to_float = time_to_float
#self._pruning_variables_remaining = None
self._applied_to_df = [] # list of object-ids the modifier was already applied to
def __str__(self):
s = "<DataFrameModifer"
for name, attr in (('New columns', '_calc_columns'),
('Exclude columns', '_exclude_columns'),
('Include columns', '_include_columns'),
('Never prune columns', '_dont_prune_columns'),
('Categorical columns', '_categorical_columns'),
('Filters', '_filters'),
('Transformations', '_baseline_mean'),
('Drop rows where target is NaN', '_drop_nan_y'),
('Drop rows where any predictor is NaN', '_drop_nan_x'),
('Time to float', '_time_to_float')):
val = getattr(self, attr)
if val:
s += f" - {name}: {val}"
if s == "<DataFrameModifer ":
s += " - No Modifications Defined"
s += ">"
return s
def __repr__(self):
return self.__str__()
def new_column(self, name, fn, *args):
"""Add new column based on other columns of the DataFrame. Example:
>>> df = pd.DataFrame([[1,2,3], [4,5,6]], columns=list('abc'))
>>> m = DataFrameModifier()
>>> m.new_column('d', lambda x, y, z: x * y + z, 'a', 'b', 'c')
>>> m.apply_to(df)
>>> df
a b c d
0 1 2 3 5
1 4 5 6 26
:param str name: Name of the new column
:param function fn: Function taking as many variables as there
are columns involved in the formula
(i.e. ``lambda x, y, z: x * y + z``)
:param str \*args: column names in the original DataFrame
representing the columns that will be positional arguments
to function ``fn``
:return: None
"""
self._calc_columns[name] = lambda df: fn(*[df[col] for col in args])
def exclude_column(self, names):
"""Columns named here will be excluded from a DataFrame where
the Modifer will be applied to. Complementary to
:meth:`include_column`
:param names: Column name or list of column names
:type names: str or list
:return: None
"""
if type(names) is str:
names = [names]
if self._include_columns:
warnings.warn("The DataFrameModifier has already include_columns"
" defined. This setting has precedence,"
" exclude_columns will be ignored")
self._exclude_columns.extend(names)
def exclude_columns(self, names):
"""Alias for :meth:`exclude_column`
"""
self.exclude_column(names)
def include_column(self, names):
"""This is the complementary definition of :meth:`exclude_column`.
*Only* the columns listed here included in the data, all others
are excluded. Has precedence over :meth:`exclude_column`
:param names: Column name or list of column names
:type names: str or list
:return: None
"""
if type(names) is str:
names = [names]
if self._exclude_columns:
warnings.warn("The DataFrameModifier has already exclude_columns"
" defined. include_columns has precedence,"
" so exclude_columns will be ignored")
self._include_columns.extend(names)
def include_columns(self, names):
"""Alias for :meth:`include_column`
"""
self.exclude_column(names)
def dont_prune_column(self, names):
"""Columns defined here are never pruned by any automated pruning
algorithm of downstream learners.
Use this on columns you want to enforce being part of the model
even if they'll be statistically non-optimal
:param names: Column name or list of column names
:type names: str or list
:return: None
"""
if type(names) is str:
names = [names]
self._dont_prune_columns.extend(names)
def dont_prune_columns(self, names):
"""Alias for :meth:`dont_prune_column`
"""
self._dont_prune_column(names)
def cat_column(self, names, mode, impute=None, inplace=True, dropfirst=True):
"""Declare column(s) as categorical data type. ``mode`` tells whether
a categorical column shall be encoded as labels_ or as
``n-1`` binary `1-hot`_ columns for ``n`` different labels.
>>> df = pd.DataFrame(list('bbaccc'), columns=['labelvar'])
>>> df['onehotvar'] = list('cddee') + [np.nan]
>>> m = DataFrameModifier()
>>> m.cat_column('labelvar', 'label')
>>> m.cat_column('onehotvar', '1-hot', impute='d')
>>> m.apply_to(df)
>>> df
labelvar onehotvar1 onehotvar2
0 1.0 0.0 1.0
1 1.0 0.0 0.0
2 0.0 0.0 0.0
3 2.0 1.0 0.0
4 2.0 1.0 0.0
5 2.0 0.0 0.0
:param names: Column name or list of column names
:type names: str or list
:param str mode:
- ``"label"``: will append / replace the column's contents
by integers.
If ``inplace=False``, a column of the same name + suffix ``0``
will be appended, otherwise the column will be replaced.
Missing values are represented as ``NaN``. Category indices
follow alphabetical order of classes.
- ``"1-hot"``: Append as many columns as there are classes - 1,
named like ``{name}0``, ``{name}1``, ``...``, where ``{name}0``
will represent the most frequent label, the rest in descending
order. However the first column with index ``0`` is dropped
to prevent perfect multi collinearity of the encoded columns.
So if every column is ``0``, this will represent the most
frequent label (use :meth:`inspect` for details)
:param str impute:
- ``None`` (default): Eliminate rows where this column's value
value is missing / NaN.
- ``value``: Replace missing value with ``value`` and then encode
the data.
:param bool inplace:
- ``True`` (default): For label encoded columns, the original
column is replaced. For 1-hot encoded columns, the original
column is dropped and only the indexed columns remain,
appended at the end of the DataFrame.
- ``False``: The original columns are kept. Any downstream
learner would get the raw data (i.e. strings) passed
into the model which will most likely raise errors.
**This setting should always be** ``True`` **except for
debugging reasons / raw data analysis**.
:param bool dropfirst:
- ``True`` (default): In case of 1-hot encoding determines that
the first column shall indeed be dropped as described above
- ``False``: The first column will be kept instead (which will
introduce multi-collinearity between the encoded columns.)
:return: None
.. _labels: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html
.. _`1-hot`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
"""
if type(names) is str:
names = [names]
if type(mode) is str:
mode = [mode] * len(names)
if type(impute) not in (list, tuple):
impute = [impute] * len(names)
if type(inplace) is bool:
inplace = [inplace] * len(names)
if type(dropfirst) is bool:
dropfirst = [dropfirst] * len(names)
for i, m in enumerate(mode):
if m not in self.CAT_MODES:
raise TypeError(f"``mode`` must be either 'label' or '1-hot': {m}")
mode[i] = self.CAT_MODE_1HOT if m in self.CAT_MODES_1HOT else m
mode[i] = self.CAT_MODE_LABEL if m in self.CAT_MODES_LABEL else m
for i, ip in enumerate(inplace):
if type(ip) is not bool:
raise TypeError(f"``inplace`` must be bool: {ip}")
for i, d in enumerate(dropfirst):
if type(d) is not bool:
raise TypeError(f"``dropfirst`` must be bool: {d}")
for name, m, im, ip, d in zip(names, mode, impute, inplace, dropfirst):
self._categorical_columns.append({'name': name, 'mode': m,
'impute': im, 'inplace': ip,
'dropfirst': d})
def cat_columns(self, *args, **kwargs):
"""Alias for :meth:`cat_column`
"""
self.cat_column(*args, **kwargs)
def get_cat_encoder(self, column):
"""Returns a tuple of the Encoder instance and categories / classes
for the passed column name(s)
:param str column: Column name or list of column names. If a list
is passed, a list of ``(encoder, classes)`` will be returned
:type column: str or list
:return: tuple(s) of ``(encoder, classes)`` where ``encoder`` is
either a OneHotEncoder_ or LabelEncoder_ instance and ``classes``
a `pd.Categorical`_ or ``encoder.classes_`` respectively.
.. _LabelEncoder: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html
.. _OneHotEncoder: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
.. _`pd.Categorical`: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Categorical.html
"""
if isinstance(column, str):
return self._categorical_columns_encoder[column]
return [self._categorical_columns_encoder[col] for col in column]
def filters(self, filterdict):
"""Define one or more filters per column, i.e. to apply
hard thresholds.
:param dict filterdict: Dictionary of ``{column: function(df, x)}``
where ``column`` is the name of a DataFrame's column and
``function(df, x)`` a function that accepts the DataFrame
as first argument and the data values of ``column``
as second argument. It should return a boolean array.
In rows where this array evaluates to ``False`` the column
will be set to ``np.nan``. Examples for filter-functions:
- ``lambda df, x: x < 10.5``
- ``lambda df, x: np.logical_and(0 < x, x < 100)``
- ``lambda df, x: x < df['other_column']`` may modify ``x``
depending on another column's data
:return: None
"""
self._filters = filterdict
def _apply_filters(self, df):
for col, func in self._filters.items():
if col not in df.columns:
continue
print("Apply filter on '{}'".format(col))
df.loc[np.logical_not(func(df, df[col])), col] = np.nan
def transform_by_category_mean(self, target_column,
categorical_column,
category_ref=None):
"""Normalize all values of ``target_column`` (*float*) depending
on the values of ``categorical_column``. For each category, the
mean is calculated and subtracted, effectively normalizing each
category to zero-mean. However if ``category_ref`` is provided,
each category will be normalized to the mean of this reference
category::
df[x] += "mean of x where categorical_column = category_ref"
- "mean of x where categorical_column = current row's category"
Doesn't affect rows where ``categorical_column`` is missing / NaN
"""
self._baseline_mean.append((target_column, categorical_column, category_ref))
def _apply_transform_by_category_mean(self, df):
for x, ccol, cat in self._baseline_mean:
means = df.groupby(ccol).mean()
if cat:
mean_ref = means.loc[cat, x] # scalar
else:
mean_ref = 0
row_ix = df.loc[pd.notna(df[ccol]), ccol]
mean_this = means.loc[row_ix, x].values # 1d array
df.loc[pd.notna(df[ccol]), x] += mean_ref - mean_this
#return df
# =============================================================================
# def prune_by_vif(self, X):
# """
# pass matrix of all exogenous variables and prune columns that have
# a Variance Inflation Factor larger than `self._vif_thresh`.
#
# return (`pruned_matrix`, `indices`) with
# `pruned_matrix`: matrix where collinear columns are removed.
# `indices`: list of indices of the remaining columns
# (referring indices of the original matrix)
#
# https://stats.stackexchange.com/questions/155028/how-to-systematically-remove-collinear-variables-in-python
# """
# variables = list(range(X.shape[1])) # we need list to be able to delete elements
# if self.x_columns is not None:
# assert len(self.x_columns) == len(variables), \
# "len(self.x_columns) == {}, but must be same as X.shape[1] == {}".format(
# len(self.x_columns), len(variables)
# )
# column_names = copy.copy(self.x_columns) # avoid changing input inplace
# else:
# column_names = variables
#
# dropped_column = True
# while dropped_column:
# dropped_column = False
# vif = []
# for ix in range(len(variables)):
# try:
# vif.append(variance_inflation_factor(X[:, variables], ix))
# except LinAlgError:
# print("LinAlgError on column {}: {}".format(ix, column_names[ix]))
# vif.append(1)
#
# for _vif, ix in reversed(sorted(zip(vif, range(len(variables))))):
# # pick variable with highest vif that's not in self._dont_prune_columns
# if _vif > self._vif_thresh and column_names[ix] not in self._dont_prune_columns:
# print('dropping col {} "{}" with VIF={:.1f}'.format(
# variables[ix], column_names[ix], _vif))
# del variables[ix]
# del column_names[ix]
# dropped_column = True
# break
#
# print('{} Remaining variables:'.format(len(column_names)))
# #print(df.columns[variables])
# print(column_names)
# #self._pruning_variables_remaining = variables
# self._is_pruned = True
# return X[:, variables], variables
# =============================================================================
def _set_categorical(self, df):
"""Encoding of categorical variables as dtype ``np.float32``.
Encoder will be accessible via
``self._categorical_columns_encoder[col]``, where ``col``
may be either be the old or the new column name.
:param pandas.DataFrame df: Input DataFrame
:return: None
"""
y_columns_new = copy.copy(self.y_columns)
# first iteration may drop rows and may hence influence labels on ALL
# columns. Second iteration transforms the remainder (NaNs are removed)
for d in self._categorical_columns:
name, impute = d['name'], d['impute']
if name not in df.columns:
warnings.warn(f"Skipping column {name}: Not found in DataFrame")
continue
if impute is None:
df.drop(index=df.loc[pd.isna(df[name]), :].index, inplace=True)
else:
df.loc[pd.isna(df[name]), name] = impute
for d in self._categorical_columns:
name, mode, inplace = d['name'], d['mode'], d['inplace']
dropfirst = "first" if d['dropfirst'] else None
if name not in df.columns:
continue
if mode.lower() in self.CAT_MODES_1HOT:
# .value_counts sorts by frequency descending;
# -1 for NaN values, but since we removed NaNs this won't occur.
c = df[name].value_counts().index
cat = pd.Categorical(df[name], categories=c)
col_int = cat.codes
# drop="first" means that there will be no column for code 0,
# (most frequent label), i.e. all columns will be zero then
enc = preprocessing.OneHotEncoder(dtype=np.float32, sparse=False,
categories="auto", drop=dropfirst)
x = col_int.reshape(-1, 1)
out = enc.fit_transform(x)
# the column suffix is equivalent to the i-th label representation
names = []
for i in range(out.shape[1]):
a = enc.inverse_transform([[(1 if _ == i else 0) for _ in \
range(out.shape[1])]])[0,0]
names.append(f"{name}{a}")
df[f"{name}{a}"] = out[:,i]
#df = pd.concat([df, pd.DataFrame(out, index=df.index)], axis=1)
#df.columns = df.columns.values.tolist()[:-out.shape[1]] + names
if inplace:
df.drop(name, axis=1, inplace=True)
for n in names:
self._categorical_columns_map[n] = name
self._categorical_columns_encoder[name] = (enc, cat)
if name in self.y_columns:
# redefine y_columns if they got renamed during set_categorical
if inplace:
del y_columns_new[y_columns_new.index(name)]
y_columns_new.extend(names)
elif mode.lower() in self.CAT_MODES_LABEL:
enc = preprocessing.LabelEncoder()
df[name] = df[name].astype(np.str_)
enc.fit(df[name])
if inplace:
col_new = name
else:
col_new = f"{name}0"
df[col_new] = enc.transform(df[name]).astype(np.float32)
self._categorical_columns_map[col_new] = name
self._categorical_columns_encoder[name] = (enc, enc.classes_)
if name in self.y_columns and not inplace:
# redefine y_columns if they got renamed during set_categorical
y_columns_new.append(col_new)
self.y_columns = y_columns_new
def inspect(self, df, mode="text"):
"""Inspect the structure of the DataFrame. Typical usage is first
defining modifiers, applying them via :meth:`apply_to` and then
inspecting the result given the modifier. Example:
>>> df = pd.DataFrame([[1, 'text', 3.5]], columns=list('abc'))
>>> m = DataFrameModifier()
>>> m.cat_column('b', 'label')
>>> m.apply_to(df)
>>> print(m.inspect(df))
col name dtype categories
0 a int64
1 b float64 'text': 0
2 c float64
:param pandas.DataFrame df: Target DataFrame to be inspected
:param str mode: Whether to return *str* (default) or an equivalent
pd.DataFrame containing meta data on the structure
if ``mode != 'text'``
:return: Either str or pd.DataFrame depending on ``mode``
"""
#df = self.df
#m = self.modifier
m = self
cols_per_dtype = {}
structure = pd.DataFrame()
max_col_length = min(40, max([4] + [len(col) for col in df.columns]))
cat_cols = {_['name']: _ for _ in m._categorical_columns}
modes = {_['mode'] for _ in m._categorical_columns}
out = f"col {'name':{max_col_length}s} dtype "
out += (f"categories\n{'':{max_col_length+19}s}(1-hot ordered by freq.)" \
if m.CAT_MODE_1HOT in modes else "categories")
out += "\n"
for i, col in enumerate(df.columns):
if df[col].dtype not in cols_per_dtype:
cols_per_dtype[df[col].dtype] = []
cols_per_dtype[df[col].dtype].append((i, col))
for dtype, cols in cols_per_dtype.items():
for i, col in cols:
cat, cat_str = "", ""
if m and col in m._categorical_columns_map.keys():
base_col = m._categorical_columns_map[col]
cat_mode = cat_cols[base_col]['mode']
enc, categ = m.get_cat_encoder(base_col)
if cat_mode in m.CAT_MODES_LABEL:
cat = dict(zip(categ, enc.transform(categ)))
cat_str = str(cat).replace("{", "").replace("}", "")\
.replace(",", f"\n{'':{max_col_length+18}s}")
elif cat_mode in m.CAT_MODES_1HOT:
# Since column suffix is equivalent to the label code,
# we can just "invert" the number (NaN is all cols zero)
n = int(col[len(base_col):])
cat = categ.from_codes([n], categ.categories)[0]
cat_str = cat
if n == 1 and cat_cols[base_col]['dropfirst']:
# We virtually prepend the dropped n = 0 case
# of the most frequent label
col_ = f"{base_col}0"
col_str_ = str(col_).replace('\n', ' ').strip()
cat_ = categ.from_codes([0], categ.categories)[0]
cat_ = f"{cat_} ..."
structure = pd.concat([structure, pd.DataFrame(
[[col_, dtype, cat_]], index=["..."])])
out += (f"... {col_str_:{max_col_length}s} "
f"{df.dtypes[col]} {cat_}\n")
structure = pd.concat([structure, pd.DataFrame(
[[col, dtype, cat]], index=[i])])
col_str = str(col).replace('\n', ' ').strip()
out += f"{i:3d} {col_str:{max_col_length}s} {df.dtypes[col]} {cat_str}\n"
out = out[:-1] # remove trailing newline
structure.columns=['column', 'dtype', 'categorical']
if mode == "text":
return out
return structure
def apply_to(self, df, y_columns=[]):
"""
Applies modifications of self to DataFrame ``df`` inplace.
Explicitly the method will:
- Create new columns
- Exclude / include defined columns
- Drop rows depending on the settings of ``drop_nan_y``
and ``drop_nan_x``
- Apply filters
- Apply transformations
- Transform categorical columns
- Transform date, datetime and timedelta columns
- Move ``y_columns`` at the end of the DataFrame
No imputation (except for explicitly defined imputation for
categorical columns via :meth:`cat_column`), normalization
or pruning is performed up to here.
These options will be provided by the :class:``Learner`` class
:param pandas.DataFrame df: DataFrame that shall be modified
/ transformed
:param y_columns: Column(s) intended as target variable(s)
for a downstream learner
:type y_columns: str or list
:return: None
"""
if isinstance(y_columns, str):
y_columns = [y_columns]
self.y_columns = y_columns
for col in self.y_columns:
assert col in df.columns, 'column "{}" doesn\'t exist'.format(col)
if id(df) in self._applied_to_df:
warnings.warn("This modifier was already applied to this DataFrame")
# new columns
new_names = [_[0] for _ in self._calc_columns]
for name, fn in self._calc_columns.items():
#print("Adding new column '{}'".format(name))
df[name] = fn(df)
# exclude / include columns
if self._include_columns:
incl_cols = [col for col in df.columns if col in \
self._include_columns + self.y_columns + new_names]
incl_cols_not_found = [col for col in self._include_columns \
if col not in df.columns]
df.drop(columns=[_ for _ in df.columns if _ not in incl_cols],
inplace=True)
if incl_cols_not_found:
warnings.warn("The following columns are to be included, but weren't "
f"present in the DataFrame: {incl_cols_not_found}")
else:
excl_cols = [col for col in self._exclude_columns if col in df.columns]
excl_cols_not_found = [col for col in self._exclude_columns \
if col not in df.columns]
df.drop(excl_cols, axis=1, inplace=True)
if excl_cols_not_found:
warnings.warn("The following columns are to be excluded, but weren't "
f"present in the DataFrame: {excl_cols_not_found}")
# drop rows with missing data
if self._drop_nan_y and self.y_columns:
# only keep rows where ALL y_columns are not NaN
#df = df.loc[np.all(pd.notna(df[self.y_columns]), axis=1), :]
df.drop(df.loc[np.any(pd.isna(df[self.y_columns]), axis=1), :].index,
inplace=True)
if self._drop_nan_x:
# only keep rows where ALL x_columns are not NaN
x_columns = [col for col in df.columns if col not in self.y_columns]
#df = df.loc[np.all(pd.notna(df[x_columns]), axis=1), :]
df.drop(df.loc[np.any( | pd.isna(df[x_columns]) | pandas.isna |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/dashboards.ipynb (unless otherwise specified).
__all__ = ['ObjectDetectionDatasetOverview', 'ObjectDetectionDatasetComparison',
'ObjectDetectionDatasetGeneratorScatter', 'ObjectDetectionDatasetGeneratorRange',
'ObjectDetectionResultOverview']
# Cell
from typing import Union, Optional, List
from abc import abstractmethod, ABC
from math import ceil, floor
import itertools
import matplotlib.pyplot as plt
from bokeh.plotting import show, output_notebook, gridplot, figure
from bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter
from bokeh.models import ColumnDataSource, HoverTool, Title
from bokeh import events
import panel as pn
import panel.widgets as pnw
import numpy as np
import pandas as pd
from .core.dashboards import *
from .plotting import *
from .core.data import *
from .data import *
from .plotting.utils import toggle_legend_js
# Cell
class ObjectDetectionDatasetOverview(DatasetOverview):
"""Dataset overview for object detection datasets"""
DESCRIPTOR_DATA = "data"
DESCRIPTOR_STATS_DATASET = "stats_dataset"
DESCRIPTOR_STATS_IMAGES = "stats_image"
DESCRIPTOR_STATS_ANNOTATIONS = "stats_class"
# change these
IMAGE_IDENTIFIER_COL = "filepath"
ANNOTATON_LABEL_COL = "label"
OBJECTS_PER_IMAGE_COL = "num_annotations"
AREA_COL = "area"
def _generate_datset_stats_tab(self):
dataset_overview_table = table_from_dataframe(getattr(self.dataset, self.DESCRIPTOR_STATS_DATASET), width=self.width, height=self.height//7)
images_overview_table = table_from_dataframe(getattr(self.dataset, self.DESCRIPTOR_STATS_IMAGES), width=self.width, height=self.height//7)
classes_overview_table = table_from_dataframe(getattr(self.dataset, self.DESCRIPTOR_STATS_ANNOTATIONS), width=self.width, height=self.height//4)
class_occurances = self.dataset.data.groupby("label").count()["id"]
class_occurance_barplot = barplot(counts=class_occurances.values, values=np.array(class_occurances.index), bar_type="vertical", height=(self.height//5)*2)
return pn.Column("<b>Dataset stats</b>", dataset_overview_table, "<b>Image stats</b>", images_overview_table, "<b>Class stats</b>", classes_overview_table, pn.Row(class_occurance_barplot, align="center"))
def _generate_annotations_tab(self):
plot_size = floor(min(self.height, self.width)*0.45)
# mixing of classes
mixing_matrix_classes_in_images = utils.calculate_mixing_matrix(getattr(self.dataset, self.DESCRIPTOR_DATA), self.IMAGE_IDENTIFIER_COL, self.ANNOTATON_LABEL_COL)
self.class_mixing_matrix_plot = pn.Column("<b>Class mixing</b>", heatmap(mixing_matrix_classes_in_images, "row_name", "col_name", "values", width=plot_size, height=plot_size), height=self.height)
# number of object per image, stacked hist
self.classes_for_objects_per_image_stacked_hist = pn.Column(
"<b>Objects per Image</b>",
stacked_hist(getattr(self.dataset, self.DESCRIPTOR_DATA), self.OBJECTS_PER_IMAGE_COL, self.ANNOTATON_LABEL_COL, "Objects per Image", width=plot_size, height=plot_size)
)
# categorical overview
self.categorical_2d_histogram = categorical_2d_histogram_with_gui(
getattr(self.dataset, self.DESCRIPTOR_DATA),
category_cols=["label", "num_annotations", "width", "height"],
hist_cols=["num_annotations", "area", "area_normalized", "area_square_root", "area_square_root_normalized", "bbox_ratio", "bbox_xmin", "bbox_xmax", "bbox_ymin", "bbox_ymax", "width", "height"],
height=self.height//2, width=self.width//2
)
# ratio distribution
grid = pn.GridSpec(ncols=2,nrows=2, width=self.width, height=self.height, align="center")
grid[0,0] = self.class_mixing_matrix_plot
grid[1,0] = self.classes_for_objects_per_image_stacked_hist
grid[:,1] = pn.Column(self.categorical_2d_histogram, align="center")
return grid
def _generate_gallery_tab(self):
return pn.Column(Gallery(self.dataset, "data", "filepath", ["num_annotations", "width", "height", "label", "area", "bbox_ratio", "bbox_width", "bbox_height"], height=self.height).show(), align="center", sizing_mode="stretch_both")
def build_gui(self):
dataset_tab = super()._generate_dataset_tab()
dataset_stats_tab = self._generate_datset_stats_tab()
annotations_tab = self._generate_annotations_tab()
gallery_tab = self._generate_gallery_tab()
self.gui = pn.Tabs(("Dataset stats", dataset_stats_tab), ("Annotations", annotations_tab), ("Gallery", gallery_tab), ("Dataset", dataset_tab), align="start")
# Cell
class ObjectDetectionDatasetComparison(DatasetComparison):
"""Dataset comparison for object detection datasets."""
DESCRIPTOR_DATA = "data"
DESCRIPTOR_STATS_DATASET = "stats_dataset"
DESCRIPTOR_STATS_IMAGES = "stats_image"
DESCRIPTOR_STATS_ANNOTATIONS = "stats_class"
# change these
IMAGE_IDENTIFIER_COL = "filepath"
ANNOTATON_LABEL_COL = "label"
OBJECTS_PER_IMAGE_COL = "num_annotations"
AREA_COL = "area"
def _generate_dataset_tab(self):
overview_table = table_from_dataframe(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_DATA), width=floor(self.width/2), height=self.height)
return pn.Row(*overview_table)
def _generate_datset_stats_tab(self):
dataset_overview_table = table_from_dataframe(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_STATS_DATASET), width=floor(self.width/2), height=self.height//7)
images_overview_table = table_from_dataframe(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_STATS_IMAGES), width=floor(self.width/2), height=self.height//7)
classes_overview_table = table_from_dataframe(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_STATS_ANNOTATIONS), width=floor(self.width/2), height=self.height//4)
class_occurances_values = [dataset.data.groupby("label").count()["id"].values for dataset in self.datasets]
class_occurances_index = [np.array(dataset.data.groupby("label").count()["id"].index) for dataset in self.datasets]
class_occurance_barplot = barplot(counts=class_occurances_values, values=class_occurances_index, bar_type="vertical", height=(self.height//5)*2, width=floor(self.width/2))
dublication_data = {dataset.name if dataset.name is not None else "Dataset_"+str(index): [getattr(dataset, self.DESCRIPTOR_DATA).duplicated().sum()] for index, dataset in enumerate(self.datasets)}
dublication_data["All"] = pd.concat(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_DATA)).duplicated().sum()
dublication_df = pd.DataFrame(dublication_data)
dublication_overview = table_from_dataframe(dublication_df)
return pn.Column(
"<b>Dublications</p>", pn.Row(dublication_overview),
"<b>Dataset stats</b>", pn.Row(*dataset_overview_table),
"<b>Image stats</b>", pn.Row(*images_overview_table),
"<b>Class stats</b>", pn.Row(*classes_overview_table),
pn.Row(*class_occurance_barplot, align="center")
)
def _generate_annotations_tab(self):
plot_size = min(floor(self.width/len(self.datasets)), floor(self.height/2))
link_plots_checkbox = pnw.Checkbox(name="Link plot axis", value=False)
@pn.depends(link_plots_checkbox.param.value)
def _mixing_plots(link_plots):
# mixing of classes
mixing_matrix_classes_in_images = [utils.calculate_mixing_matrix(dataset, self.IMAGE_IDENTIFIER_COL, self.ANNOTATON_LABEL_COL) for dataset in self._get_descriptor_for_all_datasets(self.DESCRIPTOR_DATA)]
class_mixing_matrix_plot = pn.Row("<b>Class mixing</b>", *heatmap(mixing_matrix_classes_in_images, "row_name", "col_name", "values", link_plots=link_plots, width=plot_size, height=plot_size))
# number of object per image, stacked hist
classes_for_objects_per_image_stacked_hist = pn.Row(
"<b>Objects per Image</b>",
*stacked_hist(self._get_descriptor_for_all_datasets(self.DESCRIPTOR_DATA), self.OBJECTS_PER_IMAGE_COL, self.ANNOTATON_LABEL_COL, "Objects per Image", link_plots=link_plots, width=plot_size, height=plot_size)
)
return pn.Column(link_plots_checkbox, class_mixing_matrix_plot, classes_for_objects_per_image_stacked_hist)
# categorical overview
self.categorical_2d_histogram = categorical_2d_histogram_with_gui(
self._get_descriptor_for_all_datasets(self.DESCRIPTOR_DATA),
category_cols=["label", "num_annotations", "width", "height"],
hist_cols=["num_annotations", "area", "area_normalized", "area_square_root", "area_square_root_normalized", "bbox_ratio", "bbox_xmin", "bbox_xmax", "bbox_ymin", "bbox_ymax", "bbox_width", "bbox_height", "width", "height"],
height=floor(plot_size*1.5), width=floor(plot_size*1.5)
)
return pn.Column(_mixing_plots, self.categorical_2d_histogram, align="center")
def _generate_gallery_tab(self):
return pn.Row(*[Gallery(dataset, "data", "filepath", ["num_annotations", "width", "height", "label", "area", "bbox_ratio", "bbox_width", "bbox_height"], width=floor(self.width/len(self.datasets))).show() for dataset in self.datasets], align="start", sizing_mode="stretch_both")
def build_gui(self):
dataset_tab = self._generate_dataset_tab()
dataset_stats_tab = self._generate_datset_stats_tab()
annotations_tab = self._generate_annotations_tab()
gallery_tab = self._generate_gallery_tab()
self.gui = pn.Tabs(("Dataset stats overview", dataset_stats_tab), ("Annotations overivew", annotations_tab), ("Gallery", gallery_tab), ("Dataset overview", dataset_tab), align="start")
# Cell
class ObjectDetectionDatasetGeneratorScatter(DatasetGeneratorScatter):
"""Dataset generator for object detection"""
DESCRIPTOR_STATS = "stats_dataset"
DATASET_OVERVIEW = ObjectDetectionDatasetOverview
DATASET_FILTER_COLUMNS = ["width", "height", "label", "area_normalized", "bbox_ratio", "bbox_width", "bbox_height", "num_annotations"]
# Cell
class ObjectDetectionDatasetGeneratorRange(DatasetGenerator):
"""Dataset generator for object detection"""
DESCRIPTOR_STATS = "stats_dataset"
DATASET_OVERVIEW = ObjectDetectionDatasetOverview
DATASET_FILTER_COLUMNS = ["width", "height", "label", "area_normalized", "bbox_ratio", "bbox_width", "bbox_height", "num_annotations"]
# Cell
class ObjectDetectionResultOverview(Dashboard):
"""Overview dashboard """
def __init__(self, dataset, plotting_backend="matplotlib", height=700, width=1000):
self.dataset= dataset
self.plotting_backend = plotting_backend
super().__init__(width=width, height=height)
def build_gui(self):
self.loss_tab = self.build_loss_tab()
self.ap_tab = self.build_precision_recall_tab()
self.gui = pn.Tabs(("Loss", self.loss_tab), ("Precision-Recall", self.ap_tab))
def show(self):
return self.gui
def show_loss_tab(self):
return self.loss_tab
def show_ap_tab(self):
return self.ap_tab
def build_loss_tab(self):
# loss hists
if self.plotting_backend == "bokeh":
bins_input = pnw.IntInput(name="Bins", start=1, end=100, value=10)
@pn.depends(bins_input.param.value)
def loss_hists(bins):
unique_losses = self.dataset.base_data[["filepath", "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg", "loss_total"]].drop_duplicates()
hist_line = plots_as_matrix(
histogram(
[unique_losses[loss] for loss in ["loss_total", "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg"]],
title=["loss_total", "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg"],
bins=bins, linked_axis=False), 5, 1, width=self.width, height=200
)
return hist_line
loss_hists_col = pn.Column(bins_input, loss_hists)
if self.plotting_backend == "matplotlib":
fig_loss_hists, ax_loss_hists = plt.subplots(1, 5, figsize=(16*5,9))
unique_losses = self.dataset.base_data[["filepath", "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg", "loss_total"]].drop_duplicates()
for single_ax, key in zip(ax_loss_hists, ["loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg", "loss_total"]):
single_ax.hist(unique_losses[key].values, bins=20)
single_ax.set_title(" ".join(key.split("_")).title(), fontsize=40)
for tick in single_ax.xaxis.get_major_ticks():
tick.label.set_fontsize(34)
tick.label.set_rotation(45)
for tick in single_ax.yaxis.get_major_ticks():
tick.label.set_fontsize(34)
plt.close()
loss_hists_col = pn.pane.Matplotlib(fig_loss_hists, width=self.width)
axis_cols = ['score', 'area_normalized', 'area', 'bbox_ratio', 'bbox_width', 'bbox_height', 'num_annotations', 'loss_classifier', 'loss_box_reg', 'loss_objectness', 'loss_rpn_box_reg', 'loss_total', 'width', 'height']
scatter_overview = scatter_plot_with_gui(
self.dataset.base_data[self.dataset.base_data["is_prediction"] == True],
x_cols=axis_cols[1:] + [axis_cols[0]],
y_cols=axis_cols,
color_cols=["label", "num_annotations", "filename"]
)
cat_2d_hist = categorical_2d_histogram_with_gui(
self.dataset.base_data[self.dataset.base_data["is_prediction"] == True],
category_cols=["label", "num_annotations", "filename"],
hist_cols=['loss_total', 'loss_classifier', 'loss_box_reg', 'loss_objectness', 'loss_rpn_box_reg', 'score', 'area_normalized', 'area', 'bbox_ratio', 'bbox_width', 'bbox_height', 'num_annotations', 'width', 'height', 'label']
)
sub_tabs = pn.Tabs(
("Histograms", pn.Row(pn.Spacer(sizing_mode="stretch_width"), scatter_overview, pn.Spacer(sizing_mode="stretch_width"), cat_2d_hist, pn.Spacer(sizing_mode="stretch_width"), align="center")),
("Gallery", Gallery(self.dataset, "base_data", "filepath", sort_cols=["loss_total", "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg"], height=self.height).show())
)
return pn.Column(loss_hists_col, sub_tabs)
def build_ap_overview(self, metric_data):
map_data = {key: [metric_data[key]["map"], int(len(metric_data[key].keys())-1)] for key in metric_data.keys()}
map_table = table_from_dataframe(pd.DataFrame(map_data, index=["mAP", "Classes"]).round(4))
ap_data = {}
for metric_key, metric_value in metric_data.items():
if metric_key != "map":
ap_data[metric_key] = {"class": [], "ap": []}
for class_name, class_data in metric_value.items():
if class_name != "map":
ap_data[metric_key]["class"].append(class_name)
ap_data[metric_key]["ap"].append(class_data["ap"])
ap_plots = []
for ap_key, ap_value in ap_data.items():
if len(ap_value["ap"]) > 0:
ap = np.array(ap_value["ap"])[np.array(ap_value["ap"]).argsort()]
class_names = np.array(ap_value["class"])[np.array(ap_value["ap"]).argsort()]
ap_plot = barplot(ap, class_names, bar_type="horizontal")
ap_plot.add_tools(HoverTool(tooltips = [("AP", "@y @right")]))
ap_plot.title = Title(text="mAP - " + str(metric_data[ap_key]["map"].round(4)), align="center")
ap_plots.append(pn.Column("<b>"+ap_key.replace("_", " ").title().replace("Ap", "AP")+"</b>", ap_plot))
return pn.Column(pn.Row(map_table, align="center"), pn.Row(*ap_plots, align="center"))
@staticmethod
def precision_recall_plot_bokeh(data, iou):
plot_data = pd.DataFrame({key: data[key] for key in ["recall", "precision", "scores", "tp", "fp", "fn"]})
source = ColumnDataSource(plot_data)
p = figure(x_axis_type=None, height=350, width=400, title="AP@"+str(iou)+" - "+str(round(data["ap"],4)), y_axis_label="precision", tools="")
p.line("recall", "precision", source=source, legend_label="Actual", color="black", line_width=2)
p.step(data["ap11_recalls"], data["ap11_precisions"], legend_label="AP11", color="green", line_width=2)
p.step(data["monotonic_recalls"], data["monotonic_precisions"], legend_label="Monotonic", color="firebrick", line_width=2)
p.add_tools(HoverTool(tooltips=[("Score", "@scores"), ("TP", "@tp"), ("FP", "@fp"), ("FN", "@fn")], mode="vline"))
p.js_on_event(events.DoubleTap, toggle_legend_js(p))
p.legend.click_policy="hide"
p_score = figure(x_range=p.x_range, height=150, width=400, x_axis_label="recall", y_axis_label="score", tools="")
p_score.scatter(data["recall"], data["scores"])
return pn.Row(gridplot([[p],[p_score]]))
def plot_precision_recall_curves_for_class_bokeh(self, data, class_key):
plot_list = []
for iou, plot_data in data.items():
if iou != "ap":
plot_list.append(self.precision_recall_plot_bokeh(plot_data, iou))
return plots_as_matrix(plot_list, 5, 2, width=400*5, height=500*2)
def plot_additional_stats_bokeh(self, class_data, class_name):
# histograms
hist = histogram(list(class_data.values()))
return pn.pane.Bokeh(hist)
@staticmethod
def precision_recall_plot_matplotlib(fig, data, iou, bottom, top, left, right):
gs = fig.add_gridspec(nrows=4, ncols=1, left=left, right=right, bottom=bottom, top=top, hspace=0)
ax1 = fig.add_subplot(gs[:3, :])
ax1.set_title("IOU: " + str(iou))
ax1.plot(data["recall"], data["precision"], label="Actual", color="black", lw=2)
ax1.plot(data["ap11_recalls"], data["ap11_precisions"], label="AP11", color="green", lw=2)
ax1.plot(data["monotonic_recalls"], data["monotonic_precisions"], label="Montonic", color="firebrick", lw=2)
ax1.set_xticks([])
ax1.set_ylabel("Precision")
ax1.legend()
ax2 = fig.add_subplot(gs[-1, :])
ax2.plot(data["recall"], data["scores"], ".")
ax2.set_xlabel("Recall")
ax2.set_ylabel("Score")
def plot_precision_recall_curves_for_class_matplotlib(self, data, class_key):
fig = plt.figure(constrained_layout=False, figsize=(16,9))
row_coords = [(0.55, 0.95), (0.05, 0.45)]
col_coords = [(0.05, 0.2), (0.25, 0.4), (0.45, 0.6), (0.65, 0.8), (0.85, 1)]
coord_combinations = list(itertools.product(row_coords, col_coords))
ious = sorted([iou for iou in data.keys() if iou != "ap"])
for index, iou in enumerate(ious):
if iou != "ap":
row_coord = coord_combinations[index][0]
col_coord = coord_combinations[index][1]
self.precision_recall_plot_matplotlib(fig, data[iou], iou, row_coord[0], row_coord[1], col_coord[0], col_coord[1])
plt.close()
return pn.pane.Matplotlib(fig, width=self.width)
def plot_additional_stats_matplotlib(self, class_data, class_name):
# histograms
class_data[0.5]["additional_stats"]
hist_fig, hist_ax = plt.subplots(1, len(class_data[0.5]["additional_stats"]), figsize=(9*len(class_data[0.5]["additional_stats"]), 9))
for ax, (stat_name, stat_data) in zip(hist_ax, class_data[0.5]["additional_stats"].items()):
ax.hist(stat_data, bins=20)
ax.set_xlabel(" ".join(stat_name.split("_")).title(), fontsize=32)
ax.set_ylabel("Counts", fontsize=32)
ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.xaxis.offsetText.set_fontsize(34)
for x_tick, y_tick in zip(ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()):
x_tick.label.set_fontsize(34)
y_tick.label.set_fontsize(34)
for x_tick, y_tick in zip(ax.xaxis.get_minor_ticks(), ax.yaxis.get_minor_ticks()):
x_tick.label.set_fontsize(34)
y_tick.label.set_fontsize(34)
plt.tight_layout()
plt.close()
return pn.pane.Matplotlib(hist_fig, width=self.width)
def build_precison_recall_overview(self, data):
if len(data) == 1:
return pn.Column("<h1> No information available</h1>")
class_select = pnw.Select(options=[key for key in data.keys() if key != "map"])
@pn.depends(class_select.param.value)
def _plot(class_name):
heading = pn.Row("<h1>AP - "+str(data[class_name]["ap"].round(4))+"</h1>", align="center")
table_data = {"AP": [round(data[class_name][iou_key]["ap"],4) for iou_key in data[class_name].keys() if iou_key != "ap"]}
table_df = | pd.DataFrame(table_data) | pandas.DataFrame |
# Exp - 9
# DNN applied to Kaggle Cats vs Dogs dataset, images are read using ImageDataGenerator,
# and I manipulated the data set so that Cross Validation is used in training.
# Image augmentation is applied as well.
# The final NN architecture I concluded Exp 7
# [TODO][Add link to data set here]
import os
from shutil import copyfile
from shutil import rmtree
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import KFold
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Callbacks
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
callbacks = [learning_rate_reduction]
# All files are here including train.zip and test1.zip
base_dir = './cats-v-dogs'
# This is where I am extracting train.zip, will copy from here to train/cats and
# train/dogs and to validation/cats and validation/dogs
tmp_dir = os.path.join(base_dir, 'tmp/train')
# This is training folder.
# We will copy 90% of dogs images from input tmp/train folder to train/dogs and
# 90% of cats to train/Cats. Cross validation is part of the development process
# of the model and sub-set of data samples are assigned validation set dynamically
# through Cross-Validation
train_dir = os.path.join(base_dir, 'train')
# This is the test folder. We will copy from input tmp/train folder 10% of the dogs
# to test/dogs and 10% of cats to test/Cats
test_dir = os.path.join(base_dir, 'test')
# This is kaggle test folder, we extract test1.zip here. This is the 'Production'
# Dataset you can consider where you don't know to which class each image belongs.
kaggle_test_dir = os.path.join(base_dir, 'test1')
# Directory with our training cat/dog pictures
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
test_cats_dir = os.path.join(test_dir, 'cats')
test_dogs_dir = os.path.join(test_dir, 'dogs')
# # Create folders if they are not.
print('Creating folders ....')
if not os.path.exists(train_dir):
os.mkdir(train_dir)
else:
rmtree(train_dir)
os.mkdir(train_dir)
if not os.path.exists(train_cats_dir):
os.mkdir(train_cats_dir)
if not os.path.exists(train_dogs_dir):
os.mkdir(train_dogs_dir)
if not os.path.exists(test_dir):
os.mkdir(test_dir)
else:
rmtree(test_dir)
os.mkdir(test_dir)
if not os.path.exists(test_cats_dir):
os.mkdir(test_cats_dir)
if not os.path.exists(test_dogs_dir):
os.mkdir(test_dogs_dir)
list_of_fnames = os.listdir(tmp_dir)
list_of_cats_fnames = [i for i in list_of_fnames if 'CAT' in i.upper()]
print('Found {0} CATS images in input folder tmp/train'.format(len(list_of_cats_fnames)))
list_of_dogs_fnames = [i for i in list_of_fnames if 'DOG' in i.upper()]
print('Found {0} DOGS images in input folder tmp/train'.format(len(list_of_dogs_fnames)))
np.random.shuffle(list_of_cats_fnames)
np.random.shuffle(list_of_dogs_fnames)
TOTAL_CATS = len(list_of_cats_fnames)
TOTAL_DOGS = len(list_of_dogs_fnames)
K_FOLDS = 5
TRAINING_TEST_SPLIT_AT = 0.9
BATCH_SIZE = 5
TARGET_SIZE = (128, 128)
NO_OF_EPOCHS = 5
EXPERIMENT_SIZE = 1000 # Size of the sample set per category, cats or doags.
# This is to control how many samples we want to experiment with the model on.
# This helps to build the model incrementally by experimenting on smaller
# set size, adjust parameters and the complexity of the network, then
# to seek better performance we increase complexity of the network
# train again until we overfit, add more data, and so on untill we
# we make use of all data available.
print('\nDistributing images to \n {0} \n {1} \n {2} \n {3} \n'
'such that 90% of total number of images goes to training and \n'
'10% goes to test, training is later distributed dynamically at each '
'epoch 80-20 for training and validation'.format(
train_cats_dir, train_dogs_dir,
test_cats_dir, test_dogs_dir))
c = 0
for i in list_of_cats_fnames:
if c < (round(TRAINING_TEST_SPLIT_AT * EXPERIMENT_SIZE)):
copyfile(os.path.join(tmp_dir, i), os.path.join(train_cats_dir, i))
else:
copyfile(os.path.join(tmp_dir, i), os.path.join(test_cats_dir, i))
c += 1
if c >= EXPERIMENT_SIZE:
break
c = 0
for i in list_of_dogs_fnames:
if c < (round(TRAINING_TEST_SPLIT_AT * EXPERIMENT_SIZE)):
copyfile(os.path.join(tmp_dir, i), os.path.join(train_dogs_dir, i))
else:
copyfile(os.path.join(tmp_dir, i), os.path.join(test_dogs_dir, i))
c += 1
if c >= EXPERIMENT_SIZE:
break
print('Total training cat images :', len(os.listdir(train_cats_dir)))
print('Total training dog images :', len(os.listdir(train_dogs_dir)))
print('Total test cat images :', len(os.listdir(test_cats_dir)))
print('Total test dog images :', len(os.listdir(test_dogs_dir)))
print('Loading images through generators ...')
# # Here we create ImageDataGenerator and we normalize while loading
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
#
# # # We then load data through the generator
train_generator = train_datagen.flow_from_directory(
directory=train_dir,
target_size=TARGET_SIZE, # Resize the image while loading
batch_size=BATCH_SIZE, #
class_mode='binary',
shuffle=False) # 1 Dimensional binary labels, generator assigns 0 to cats, and 1 to dogs
# we can see that from train_generator.model.indicies
TOTAL_TRAINING = len(train_generator.filenames)
#
test_generator = test_datagen.flow_from_directory(
directory=test_dir,
target_size=TARGET_SIZE,
batch_size=1, # one sample at a time for evaluation
class_mode='binary'
)
#
TOTAL_TEST = len(test_generator.filenames)
print('Constructing and compiling model ...')
# TODO use NN archicture concluded from Exp - 7
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(8, (3, 3), activation='relu', input_shape=(128, 128, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# tf.keras.layers.Dropout(0.2),
# tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
# tf.keras.layers.MaxPooling2D(2, 2),
# tf.keras.layers.Dropout(0.2),
# tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
# tf.keras.layers.MaxPooling2D(2, 2),
# tf.keras.layers.Dropout(0.2),
# tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
# tf.keras.layers.MaxPooling2D(2, 2),
# tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(1024, activation='relu'),
# tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(16, activation='relu'),
# tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=RMSprop(lr=0.001),
loss='binary_crossentropy', # not sparse_crossentropy or categorical_corssentropy since
# we are doing two class which can ben handled as
# a binary classification
metrics=['accuracy'])
# Load existing model for incremental learning if it exists
# if os.path.exists("model.h5"):
# print('loading previous model......')
# model.load_weights("model.h5")
# here we train the model
print('Training ....')
# TODO fill in a dataframe with filenames and classifications to either (0 'Cats', 1 'Dogs)
# TODO split the datafame randomely at each epoch iteration to validation and training
# TODO supply validation and training data frame to model.fit method and set epochs = 1
kf = KFold(n_splits=K_FOLDS, random_state=None, shuffle=True)
kf.get_n_splits(train_generator.filenames)
X = np.array(train_generator.filenames)
labels = dict((k, v) for k, v in train_generator.class_indices.items())
Y = np.array([labels[os.path.dirname(train_generator.filenames[i])]
for i in (0, len(train_generator.filenames))])
# Labels will be redefined again at the end of the scritp to use keys in place of values
# for the submission
# df["category"] = df["category"].replace({0: 'cat', 1: 'dog'})
for i in range(NO_OF_EPOCHS):
for train_index, test_index in kf.split(X):
trainData = X[train_index]
testData = X[test_index]
trainLabels = Y[train_index]
testLabels = Y[test_index]
print("=========================================")
print("====== K Fold Validation step => %d/%d =======" % (i,k_folds))
print("=========================================")
df = pd.DataFrame({
'filename': trainData,
'category': trainLabels
})
trainGenerator = Generator(trainData,trainLabels,
batchSize=batchSize,imageSize=imageSize,
augment=True,
grayMode=grayMode)
valGenerator = Generator(testData,testLabels,
batchSize=batchSize,imageSize=imageSize,
augment=False,
grayMode=grayMode)
history = model.fit(
train_generator,
epochs=1,
validation_data=validation_generator,
steps_per_epoch=TOTAL_TRAINING / BATCH_SIZE,
validation_steps=TOTAL_VALIDATION / BATCH_SIZE,
verbose=2) # Found that this is the clearest, no annoying progress bars
#
# # -----------------------------------------------------------
# # Retrieve a list of list results on training and test data
# # sets for each training epoch
#
# # -----------------------------------------------------------
# To have a health training Loss should dcrease while accuracy increases
# if loss increase while accuracy increases then this is an overfitting case
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
# loss = history.history['loss']
# val_loss = history.history['val_loss']
#
epochs = range(len(acc)) # Get number of epochs
#
# # # ------------------------------------------------
# # # Plot training and validation accuracy per epoch
# # # ------------------------------------------------
plt.plot(epochs, acc, color='b', label="Training accuracy")
plt.plot(epochs, val_acc, color='r', label="Validation accuracy")
plt.title('Training and validation accuracy')
plt.legend(loc='best', shadow=True)
# plt.figure()
# #
# # # ------------------------------------------------
# # # Plot training and validation loss per epoch
# # # ------------------------------------------------
# # plt.plot(epochs, loss, color='b', label="Training loss")
# # plt.plot(epochs, val_loss, color='r', label="Validation loss")
# # plt.title('Training and validation loss')
#
# # plt.legend(loc='best', shadow=True)
plt.show()
# #
# # # Save the model
model.save_weights("model.h5")
#
# Test on a sample of never seen images
print('Testing ....')
# TODO change this to avoid looping
testing_results = model.evaluate(test_generator)
for i in range(len(model.metrics_names)):
if 'accuracy' in model.metrics_names[i]:
print('Accuracy is {0}'.format(testing_results[i]))
# # Prediction for Kaggle submission,
# This is similar to when Production data is coming to the trained model to work on.
print('Production ....')
kaggle_test_datagen = ImageDataGenerator(rescale=1.0 / 255)
kaggle_test_generator = kaggle_test_datagen.flow_from_directory(
directory=base_dir,
target_size=(128, 128),
class_mode='binary',
batch_size=1,
classes=['test1'],
shuffle=False) # Very important to be set to False in order for the order of the filenames
# read by the generator to match the order of predictions generated by
# model.predict method and then we can associate predicionts with files names
NO_SMAPLES = len(kaggle_test_generator.filenames)
predictions = model.predict(kaggle_test_generator, steps=NO_SMAPLES)
# Preparing Kaggle submission file
predictions = np.where(predictions >= 0.5, 1, 0) # rounding probabilities of the output
filenames = kaggle_test_generator.filenames #
Ids = [i.split('.')[0].split('\\')[1] for i in filenames] # extract ids from filenames
labels = dict((v, k) for k, v in train_generator.class_indices.items()) # labels and their indices
predictions = [labels[k[0]] for k in predictions]
submission_df = | pd.DataFrame({"id": Ids, "label": predictions}) | pandas.DataFrame |
# Copyright 2019 <NAME> GmbH
# Copyright 2020-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
from caret_analyze.record.record_factory import RecordFactory, RecordsFactory
import pandas as pd
from tracetools_analysis.data_model import (DataModel,
DataModelIntermediateStorage)
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._publishers: DataModelIntermediateStorage = []
self._subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
self._executors: DataModelIntermediateStorage = []
self._executors_static: DataModelIntermediateStorage = []
self._callback_groups: DataModelIntermediateStorage = []
self._callback_groups_static: DataModelIntermediateStorage = []
self._callback_group_timer: DataModelIntermediateStorage = []
self._callback_group_subscription: DataModelIntermediateStorage = []
self._callback_group_service: DataModelIntermediateStorage = []
self._callback_group_client: DataModelIntermediateStorage = []
self._rmw_impl: DataModelIntermediateStorage = []
self._tilde_subscriptions: DataModelIntermediateStorage = []
self._tilde_publishers: DataModelIntermediateStorage = []
self._tilde_subscribe_added: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
# string argument
self._lifecycle_transitions: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self.callback_start_instances = RecordsFactory.create_instance(
None, ['callback_start_timestamp', 'callback_object', 'is_intra_process']
)
self.callback_end_instances = RecordsFactory.create_instance(
None, ['callback_end_timestamp', 'callback_object']
)
self.dds_write_instances = RecordsFactory.create_instance(
None, ['tid', 'dds_write_timestamp', 'message']
)
self.dds_bind_addr_to_stamp = RecordsFactory.create_instance(
None, ['tid', 'dds_bind_addr_to_stamp_timestamp', 'addr', 'source_timestamp']
)
self.dds_bind_addr_to_addr = RecordsFactory.create_instance(
None, ['dds_bind_addr_to_addr_timestamp', 'addr_from', 'addr_to']
)
self.on_data_available_instances = RecordsFactory.create_instance(
None, ['on_data_available_timestamp', 'source_timestamp']
)
self.rclcpp_intra_publish_instances = RecordsFactory.create_instance(
None, ['tid', 'rclcpp_intra_publish_timestamp', 'publisher_handle',
'message', 'message_timestamp']
)
self.rclcpp_publish_instances = RecordsFactory.create_instance(
None, [
'tid', 'rclcpp_publish_timestamp', 'publisher_handle',
'message', 'message_timestamp'
]
)
self.rcl_publish_instances = RecordsFactory.create_instance(
None, ['tid', 'rcl_publish_timestamp', 'publisher_handle', 'message']
)
self.dispatch_subscription_callback_instances = RecordsFactory.create_instance(
None, ['dispatch_subscription_callback_timestamp', 'callback_object', 'message',
'source_timestamp', 'message_timestamp'])
self.dispatch_intra_process_subscription_callback_instances = \
RecordsFactory.create_instance(
None,
['dispatch_intra_process_subscription_callback_timestamp', 'callback_object',
'message', 'message_timestamp']
)
self.message_construct_instances = RecordsFactory.create_instance(
None, ['message_construct_timestamp', 'original_message', 'constructed_message']
)
self.tilde_subscribe = RecordsFactory.create_instance(
None, [
'tilde_subscribe_timestamp',
'subscription',
'tilde_message_id']
)
self.tilde_publish = RecordsFactory.create_instance(
None, [
'tilde_publish_timestamp',
'publisher',
'subscription_id',
'tilde_message_id']
)
self.sim_time = RecordsFactory.create_instance(
None, [
'system_time',
'sim_time']
)
self.timer_event = RecordsFactory.create_instance(
None, [
'time_event_stamp']
)
def add_context(self, context_handle, timestamp, pid, version) -> None:
record = {
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version, # Comment out to align with Dict[str: int64_t]
}
self._contexts.append(record)
def add_node(self, node_handle, timestamp, tid, rmw_handle, name, namespace) -> None:
record = {
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'namespace': namespace,
'name': name,
}
self._nodes.append(record)
def add_publisher(self, handle, timestamp, node_handle, rmw_handle, topic_name, depth) -> None:
record = {
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
}
self._publishers.append(record)
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
record = {
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
}
self._subscriptions.append(record)
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
record = {
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
}
self._subscription_objects.append(record)
def add_service(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
'service_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
}
self._services.append(record)
def add_client(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
}
self._clients.append(record)
def add_timer(self, handle, timestamp, period, tid) -> None:
record = {
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
}
self._timers.append(record)
def add_tilde_subscribe_added(
self, subscription_id, node_name, topic_name, timestamp
) -> None:
record = {
'subscription_id': subscription_id,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp
}
self._tilde_subscribe_added.append(record)
def add_timer_node_link(self, handle, timestamp, node_handle) -> None:
record = {
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
}
self._timer_node_links.append(record)
def add_callback_object(self, reference, timestamp, callback_object) -> None:
record = {
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
}
self._callback_objects.append(record)
def add_callback_symbol(self, callback_object, timestamp, symbol) -> None:
record = {
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
}
self._callback_symbols.append(record)
def add_lifecycle_state_machine(self, handle, node_handle) -> None:
record = {
'state_machine_handle': handle,
'node_handle': node_handle,
}
self._lifecycle_state_machines.append(record)
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
record = {
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
}
self._lifecycle_transitions.append(record)
def add_tilde_subscription(
self, subscription, node_name, topic_name, timestamp
) -> None:
record = {
'subscription': subscription,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp,
}
self._tilde_subscriptions.append(record)
def add_tilde_publisher(
self, publisher, node_name, topic_name, timestamp
) -> None:
record = {
'publisher': publisher,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp,
}
self._tilde_publishers.append(record)
def add_callback_start_instance(
self, timestamp: int, callback: int, is_intra_process: bool
) -> None:
record = RecordFactory.create_instance(
{
'callback_start_timestamp': timestamp,
'callback_object': callback,
'is_intra_process': is_intra_process,
}
)
self.callback_start_instances.append(record)
def add_callback_end_instance(self, timestamp: int, callback: int) -> None:
record = RecordFactory.create_instance(
{'callback_end_timestamp': timestamp, 'callback_object': callback}
)
self.callback_end_instances.append(record)
def add_rclcpp_intra_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rclcpp_intra_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
'message_timestamp': message_timestamp,
}
)
self.rclcpp_intra_publish_instances.append(record)
def add_rclcpp_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rclcpp_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
'message_timestamp': message_timestamp,
}
)
self.rclcpp_publish_instances.append(record)
def add_rcl_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rcl_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
}
)
self.rcl_publish_instances.append(record)
def add_dds_write_instance(
self,
tid: int,
timestamp: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'dds_write_timestamp': timestamp,
'message': message,
}
)
self.dds_write_instances.append(record)
def add_dds_bind_addr_to_addr(
self,
timestamp: int,
addr_from: int,
addr_to: int,
) -> None:
record = RecordFactory.create_instance(
{
'dds_bind_addr_to_addr_timestamp': timestamp,
'addr_from': addr_from,
'addr_to': addr_to,
}
)
self.dds_bind_addr_to_addr.append(record)
def add_dds_bind_addr_to_stamp(
self,
tid: int,
timestamp: int,
addr: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'dds_bind_addr_to_stamp_timestamp': timestamp,
'addr': addr,
'source_timestamp': source_timestamp,
}
)
self.dds_bind_addr_to_stamp.append(record)
def add_on_data_available_instance(
self,
timestamp: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'on_data_available_timestamp': timestamp,
'source_timestamp': source_timestamp,
}
)
self.on_data_available_instances.append(record)
def add_message_construct_instance(
self, timestamp: int, original_message: int, constructed_message: int
) -> None:
record = RecordFactory.create_instance(
{
'message_construct_timestamp': timestamp,
'original_message': original_message,
'constructed_message': constructed_message,
}
)
self.message_construct_instances.append(record)
def add_dispatch_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
source_timestamp: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'dispatch_subscription_callback_timestamp': timestamp,
'callback_object': callback_object,
'message': message,
'source_timestamp': source_timestamp,
'message_timestamp': message_timestamp,
}
)
self.dispatch_subscription_callback_instances.append(record)
def add_sim_time(
self,
timestamp: int,
sim_time: int
) -> None:
record = RecordFactory.create_instance(
{
'system_time': timestamp,
'sim_time': sim_time
}
)
self.sim_time.append(record)
def add_rmw_implementation(self, rmw_impl: str):
self._rmw_impl.append({'rmw_impl': rmw_impl})
def add_dispatch_intra_process_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'dispatch_intra_process_subscription_callback_timestamp': timestamp,
'callback_object': callback_object,
'message': message,
'message_timestamp': message_timestamp
}
)
self.dispatch_intra_process_subscription_callback_instances.append(
record)
def add_tilde_subscribe(
self,
timestamp: int,
subscription: int,
tilde_message_id: int,
) -> None:
record = RecordFactory.create_instance(
{
'tilde_subscribe_timestamp': timestamp,
'subscription': subscription,
'tilde_message_id': tilde_message_id
}
)
self.tilde_subscribe.append(record)
def add_tilde_publish(
self,
timestamp: int,
publisher: int,
subscription_id: int,
tilde_message_id: int,
) -> None:
record = RecordFactory.create_instance(
{
'tilde_publish_timestamp': timestamp,
'publisher': publisher,
'subscription_id': subscription_id,
'tilde_message_id': tilde_message_id,
}
)
self.tilde_publish.append(record)
def add_executor(
self,
executor_addr: int,
timestamp: int,
executor_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'executor_type_name': executor_type_name,
}
self._executors.append(record)
def add_executor_static(
self,
executor_addr: int,
entities_collector_addr: int,
timestamp: int,
executor_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'entities_collector_addr': entities_collector_addr,
'executor_type_name': executor_type_name,
}
self._executors_static.append(record)
def add_callback_group(
self,
executor_addr: int,
timestamp: int,
callback_group_addr: int,
group_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'callback_group_addr': callback_group_addr,
'group_type_name': group_type_name
}
self._callback_groups.append(record)
def add_callback_group_static_executor(
self,
entities_collector_addr: int,
timestamp: int,
callback_group_addr: int,
group_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'entities_collector_addr': entities_collector_addr,
'callback_group_addr': callback_group_addr,
'group_type_name': group_type_name
}
self._callback_groups_static.append(record)
def callback_group_add_timer(
self,
callback_group_addr: int,
timestamp: int,
timer_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'timer_handle': timer_handle,
}
self._callback_group_timer.append(record)
def callback_group_add_subscription(
self,
callback_group_addr: int,
timestamp: int,
subscription_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'subscription_handle': subscription_handle,
}
self._callback_group_subscription.append(record)
def callback_group_add_service(
self,
callback_group_addr: int,
timestamp: int,
service_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'service_handle': service_handle,
}
self._callback_group_service.append(record)
def callback_group_add_client(
self,
callback_group_addr: int,
timestamp: int,
client_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'client_handle': client_handle,
}
self._callback_group_client.append(record)
def _finalize(self) -> None:
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.publishers = pd.DataFrame.from_dict(self._publishers)
if self._publishers:
self.publishers.set_index(
'publisher_handle', inplace=True, drop=True)
self.subscriptions = | pd.DataFrame.from_dict(self._subscriptions) | pandas.DataFrame.from_dict |
"""Test for utils.py"""
from unittest.mock import Mock
import numpy as np
import pytest
from scipy import sparse
import torch
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import pack_padded_sequence
from scripts.study_case.ID_12.skorch.tests.conftest import pandas_installed
class TestToTensor:
@pytest.fixture
def to_tensor(self):
from scripts.study_case.ID_12.skorch.utils import to_tensor
return to_tensor
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_device_setting_cuda(self, to_tensor):
x = np.ones((2, 3, 4))
t = to_tensor(x, device='cpu')
assert t.device.type == 'cpu'
t = to_tensor(x, device='cuda')
assert t.device.type.startswith('cuda')
t = to_tensor(t, device='cuda')
assert t.device.type.startswith('cuda')
t = to_tensor(t, device='cpu')
assert t.device.type == 'cpu'
def tensors_equal(self, x, y):
""""Test that tensors in diverse containers are equal."""
if isinstance(x, PackedSequence):
return self.tensors_equal(x[0], y[0]) and self.tensors_equal(x[1], y[1])
if isinstance(x, dict):
return (
(x.keys() == y.keys()) and
all(self.tensors_equal(x[k], y[k]) for k in x)
)
if isinstance(x, (list, tuple)):
return all(self.tensors_equal(xi, yi) for xi, yi in zip(x, y))
if x.is_sparse is not y.is_sparse:
return False
if x.is_sparse:
x, y = x.to_dense(), y.to_dense()
return (x == y).all()
# pylint: disable=no-method-argument
def parameters():
"""Yields data, expected value, and device for tensor conversion
test.
Stops earlier when no cuda device is available.
"""
device = 'cpu'
x = torch.zeros((5, 3)).float()
y = torch.as_tensor([2, 2, 1])
z = np.arange(15).reshape(5, 3)
for X, expected in [
(x, x),
(y, y),
([x, y], [x, y]),
((x, y), (x, y)),
(z, torch.as_tensor(z)),
(
{'a': x, 'b': y, 'c': z},
{'a': x, 'b': y, 'c': torch.as_tensor(z)}
),
(torch.as_tensor(55), torch.as_tensor(55)),
(pack_padded_sequence(x, y), pack_padded_sequence(x, y)),
]:
yield X, expected, device
if not torch.cuda.is_available():
return
device = 'cuda'
x = x.to('cuda')
y = y.to('cuda')
for X, expected in [
(x, x),
(y, y),
([x, y], [x, y]),
((x, y), (x, y)),
(z, torch.as_tensor(z).to('cuda')),
(
{'a': x, 'b': y, 'c': z},
{'a': x, 'b': y, 'c': torch.as_tensor(z).to('cuda')}
),
(torch.as_tensor(55), torch.as_tensor(55).to('cuda')),
(
pack_padded_sequence(x, y),
pack_padded_sequence(x, y).to('cuda')
),
]:
yield X, expected, device
@pytest.mark.parametrize('X, expected, device', parameters())
def test_tensor_conversion_cuda(self, to_tensor, X, expected, device):
result = to_tensor(X, device)
assert self.tensors_equal(result, expected)
assert self.tensors_equal(expected, result)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_sparse_tensor(self, to_tensor, device):
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip()
inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
expected = torch.sparse_coo_tensor(size=(5, 3)).to(device)
result = to_tensor(inp, device=device, accept_sparse=True)
assert self.tensors_equal(result, expected)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_sparse_tensor_not_accepted_raises(self, to_tensor, device):
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip()
inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
with pytest.raises(TypeError) as exc:
to_tensor(inp, device=device)
msg = ("Sparse matrices are not supported. Set "
"accept_sparse=True to allow sparse matrices.")
assert exc.value.args[0] == msg
class TestDuplicateItems:
@pytest.fixture
def duplicate_items(self):
from scripts.study_case.ID_12.skorch.utils import duplicate_items
return duplicate_items
@pytest.mark.parametrize('collections', [
([],),
([], []),
([], [], []),
([1, 2]),
([1, 2], [3]),
([1, 2], [3, '1']),
([1], [2], [3], [4]),
({'1': 1}, [2]),
({'1': 1}, {'2': 1}, ('3', '4')),
])
def test_no_duplicates(self, duplicate_items, collections):
assert duplicate_items(*collections) == set()
@pytest.mark.parametrize('collections, expected', [
([1, 1], {1}),
(['1', '1'], {'1'}),
([[1], [1]], {1}),
([[1, 2, 1], [1]], {1}),
([[1, 1], [2, 2]], {1, 2}),
([[1], {1: '2', 2: '2'}], {1}),
([[1, 2], [3, 4], [2], [3]], {2, 3}),
([{'1': 1}, {'1': 1}, ('3', '4')], {'1'}),
])
def test_duplicates(self, duplicate_items, collections, expected):
assert duplicate_items(*collections) == expected
class TestParamsFor:
@pytest.fixture
def params_for(self):
from scripts.study_case.ID_12.skorch.utils import params_for
return params_for
@pytest.mark.parametrize('prefix, kwargs, expected', [
('p1', {'p1__a': 1, 'p1__b': 2}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2}, {}),
('p1', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 3}),
])
def test_params_for(self, params_for, prefix, kwargs, expected):
assert params_for(prefix, kwargs) == expected
class TestDataFromDataset:
@pytest.fixture
def data_from_dataset(self):
from scripts.study_case.ID_12.skorch.utils import data_from_dataset
return data_from_dataset
@pytest.fixture
def data(self):
X = np.arange(8).reshape(4, 2)
y = np.array([1, 3, 0, 2])
return X, y
@pytest.fixture
def skorch_ds(self, data):
from scripts.study_case.ID_12.skorch.dataset import Dataset
return Dataset(*data)
@pytest.fixture
def subset(self, skorch_ds):
from torch.utils.data.dataset import Subset
return Subset(skorch_ds, [1, 3])
@pytest.fixture
def subset_subset(self, subset):
from torch.utils.data.dataset import Subset
return Subset(subset, [0])
# pylint: disable=missing-docstring
@pytest.fixture
def other_ds(self, data):
class MyDataset:
"""Non-compliant dataset"""
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[0][idx], self.data[1][idx]
def __len__(self):
return len(self.data[0])
return MyDataset(data)
def test_with_skorch_ds(self, data_from_dataset, data, skorch_ds):
X, y = data_from_dataset(skorch_ds)
assert (X == data[0]).all()
assert (y == data[1]).all()
def test_with_subset(self, data_from_dataset, data, subset):
X, y = data_from_dataset(subset)
assert (X == data[0][[1, 3]]).all()
assert (y == data[1][[1, 3]]).all()
def test_with_subset_subset(self, data_from_dataset, data, subset_subset):
X, y = data_from_dataset(subset_subset)
assert (X == data[0][1]).all()
assert (y == data[1][1]).all()
def test_with_other_ds(self, data_from_dataset, other_ds):
with pytest.raises(AttributeError):
data_from_dataset(other_ds)
def test_with_dict_data(self, data_from_dataset, data, subset):
subset.dataset.X = {'X': subset.dataset.X}
X, y = data_from_dataset(subset)
assert (X['X'] == data[0][[1, 3]]).all()
assert (y == data[1][[1, 3]]).all()
def test_subset_with_y_none(self, data_from_dataset, data, subset):
subset.dataset.y = None
X, y = data_from_dataset(subset)
assert (X == data[0][[1, 3]]).all()
assert y is None
class TestMultiIndexing:
@pytest.fixture
def multi_indexing(self):
from scripts.study_case.ID_12.skorch.dataset import multi_indexing
return multi_indexing
@pytest.mark.parametrize('data, i, expected', [
(
np.arange(12).reshape(4, 3),
slice(None),
np.arange(12).reshape(4, 3),
),
(
np.arange(12).reshape(4, 3),
np.s_[2],
np.array([6, 7, 8]),
),
(
np.arange(12).reshape(4, 3),
np.s_[-2:],
np.array([[6, 7, 8], [9, 10, 11]]),
),
])
def test_ndarray(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
(
torch.arange(0, 12).view(4, 3),
slice(None),
np.arange(12).reshape(4, 3),
),
(
torch.arange(0, 12).view(4, 3),
np.s_[2],
np.array([6, 7, 8]),
),
(
torch.arange(0, 12).view(4, 3),
np.int64(2),
np.array([6, 7, 8]),
),
(
torch.arange(0, 12).view(4, 3),
np.s_[-2:],
np.array([[6, 7, 8], [9, 10, 11]]),
),
])
def test_torch_tensor(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i).long().numpy()
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
([1, 2, 3, 4], slice(None), [1, 2, 3, 4]),
([1, 2, 3, 4], slice(None, 2), [1, 2]),
([1, 2, 3, 4], 2, 3),
([1, 2, 3, 4], -2, 3),
])
def test_list(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
({'a': [0, 1, 2], 'b': [3, 4, 5]}, 0, {'a': 0, 'b': 3}),
(
{'a': [0, 1, 2], 'b': [3, 4, 5]},
np.s_[:2],
{'a': [0, 1], 'b': [3, 4]},
)
])
def test_dict_of_lists(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result == expected
@pytest.mark.parametrize('data, i, expected', [
(
{'a': np.arange(3), 'b': np.arange(3, 6)},
0,
{'a': 0, 'b': 3}
),
(
{'a': np.arange(3), 'b': np.arange(3, 6)},
np.s_[:2],
{'a': np.arange(2), 'b': np.arange(3, 5)}
),
])
def test_dict_of_arrays(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result.keys() == expected.keys()
for k in result:
assert np.allclose(result[k], expected[k])
@pytest.mark.parametrize('data, i, expected', [
(
{'a': torch.arange(0, 3), 'b': torch.arange(3, 6)},
0,
{'a': 0, 'b': 3}
),
(
{'a': torch.arange(0, 3), 'b': torch.arange(3, 6)},
np.s_[:2],
{'a': np.arange(2), 'b': np.arange(3, 5)}
),
])
def test_dict_of_torch_tensors(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result.keys() == expected.keys()
for k in result:
try:
val = result[k].long().numpy()
except AttributeError:
val = result[k]
assert np.allclose(val, expected[k])
def test_mixed_data(self, multi_indexing):
data = [
[1, 2, 3],
np.arange(3),
torch.arange(3, 6),
{'a': [4, 5, 6], 'b': [7, 8, 9]},
]
result = multi_indexing(data, 0)
expected = [1, 0, 3, {'a': 4, 'b': 7}]
assert result == expected
def test_mixed_data_slice(self, multi_indexing):
data = [
[1, 2, 3],
np.arange(3),
torch.arange(3, 6),
{'a': [4, 5, 6], 'b': [7, 8, 9]},
]
result = multi_indexing(data, np.s_[:2])
assert result[0] == [1, 2]
assert np.allclose(result[1], np.arange(2))
assert np.allclose(result[2].long().numpy(), np.arange(3, 5))
assert result[3] == {'a': [4, 5], 'b': [7, 8]}
@pytest.fixture
def pd(self):
if not pandas_installed:
pytest.skip()
import pandas as pd
return pd
def test_pandas_dataframe(self, multi_indexing, pd):
df = pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0])
result = multi_indexing(df, 0)
# Note: taking one row of a DataFrame returns a Series
expected = pd.Series(data=[0, 3], index=['a', 'b'], name=2)
assert result.equals(expected)
def test_pandas_dataframe_slice(self, multi_indexing, pd):
import pandas as pd
df = pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0])
result = multi_indexing(df, np.s_[:2])
expected = pd.DataFrame({'a': [0, 1], 'b': [3, 4]}, index=[2, 1])
assert result.equals(expected)
def test_pandas_series(self, multi_indexing, pd):
series = pd.Series(data=[0, 1, 2], index=[2, 1, 0])
result = multi_indexing(series, 0)
assert result == 0
def test_pandas_series_slice(self, multi_indexing, pd):
series = pd.Series(data=[0, 1, 2], index=[2, 1, 0])
result = multi_indexing(series, np.s_[:2])
expected = pd.Series(data=[0, 1], index=[2, 1])
assert result.equals(expected)
def test_list_of_dataframe_and_series(self, multi_indexing, pd):
data = [
pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0]),
| pd.Series(data=[0, 1, 2], index=[2, 1, 0]) | pandas.Series |
'''
pyjade
A program to export, curate, and transform data from the MySQL database used by the Jane Addams Digital Edition.
'''
import os
import re
import sys
import json
import string
import datetime
import mysql.connector
from diskcache import Cache
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from tqdm import tqdm
from safeprint import print
'''
Options
'''
try: # Options file setup credit <NAME>
with open(os.path.join('options.json')) as env_file:
ENV = json.loads(env_file.read())
except:
print('"Options.json" not found; please add "options.json" to the current directory.')
'''
SQL Connection
'''
DB = mysql.connector.connect(
host=ENV['SQL']['HOST'],
user=ENV['SQL']['USER'],
passwd=ENV['SQL']['PASSWORD'],
database=ENV['SQL']['DATABASE']
)
CUR = DB.cursor(buffered=True)
'''
Setup
'''
BEGIN = datetime.datetime.now()
TS = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
ITEM_ELEMENTS = ENV['ELEMENT_DICTIONARY']['DCTERMS_IN_USE']
ITEM_ELEMENTS.update(ENV['ELEMENT_DICTIONARY']['DESC_JADE_ELEMENTS'])
TYPES = ENV['ELEMENT_DICTIONARY']['TYPES']
OUT_DIR = 'outputs/'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
DATASET_OPTIONS = ENV['DATASET_OPTIONS']
CRUMBS = DATASET_OPTIONS['EXPORT_SEPARATE_SQL_CRUMBS']
PROP_SET_LIST = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
INCLUDE_PROPS = DATASET_OPTIONS['PROPERTIES_TO_INCLUDE_FOR_EACH_TYPE']
class Dataset():
def __init__(self):
'''
Start building the dataset objects by pulling IDs and types from omek_items
'''
statement = '''
SELECT omek_items.id as item_id, omek_item_types.`name` as 'jade_type', collection_id as 'jade_collection' FROM omek_items
JOIN omek_item_types on omek_items.item_type_id = omek_item_types.id
WHERE public = 1
ORDER BY item_id;
'''
self.omek_items = pd.read_sql(statement,DB)
self.omek_items = self.omek_items.set_index('item_id',drop=False)
self.objects = self.omek_items.copy()
self.objects['item_id'] = self.objects['item_id'].apply(
lambda x: self.convert_to_jade_id(x))
self.objects.rename(columns={'item_id': 'jade_id'},inplace=True)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects = self.objects[self.objects['jade_type'].isin(
['Text','Event','Person','Organization','Publication']
)]
# Noise is an alternate dataset to record property values that dont fit the regular usage
self.noise = self.objects.copy()
self.noise.drop('jade_type',axis=1)
self.noise.drop('jade_collection',axis=1)
def ingest(self,limit=None):
'''
Get the item element texts
'''
statement = f'''
SELECT et.id AS id, et.record_id AS record_id,
et.element_id AS element_id, et.`text` AS el_text,
items.item_type_id AS item_type
FROM omek_element_texts as et
JOIN omek_items AS items ON et.record_id = items.id
WHERE record_type = "Item"
ORDER BY id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.element_texts = pd.read_sql(statement,DB)
# Load environment variables
ELEMENT_IDS = list(ITEM_ELEMENTS.keys())
# Set data structure:
data = {}
noise = {}
# Iterate through the element_texts
iter = tqdm(self.element_texts.iterrows())
iter.set_description("Ingesting item attributes")
for tup in iter:
row = tup[1]
element_id = str(row.loc['element_id'])
if row.loc['record_id'] in self.omek_items.index.values:
jade_type = self.omek_items.loc[row.loc['record_id'],'jade_type']
jade_id = self.convert_to_jade_id(row.loc['record_id'])
# Filter element texts through environment variables
if element_id in ELEMENT_IDS:
if jade_type in TYPES.values():
element_label = ITEM_ELEMENTS[element_id]
# Filters property values through the sets designated in the options
if element_label in INCLUDE_PROPS[jade_type]:
compile_json(data,jade_id,element_label,row.loc['el_text'])
else:
compile_json(noise,jade_id,element_label,row.loc['el_text'])
# if CRUMBS:
# print('Excluded',element_label,'in type',jade_type)
# Add accumulated data to DataFrame
new_df = pd.DataFrame.from_dict(data,orient='index')
new_noise_df = pd.DataFrame.from_dict(noise,orient='index')
self.objects = pd.concat([self.objects,new_df],axis=1)
self.noise = pd.concat([self.noise,new_noise_df],axis=1)
# Add URLs
base_url = "https://digital.janeaddams.ramapo.edu/items/show/"
self.objects.insert(loc=1,column='jade_url',value=[
base_url+id.split('_')[-1] for id in self.objects.index.values
])
self.add_collections(limit)
self.add_tags(limit)
# Remove records with no title fields found
self.objects = self.objects.dropna(subset=['dcterms_title'])
def convert_to_jade_id(self,item_id):
'''
Prepend the type string to the SQL primary key so that locations and items are unique in the same set of relations
'''
if type(item_id) != type(str):
if item_id in self.omek_items.index.values:
the_type = self.omek_items.at[item_id,"jade_type"]
if the_type in list(TYPES.values()):
return the_type.lower()+"_"+str(item_id)
else:
return "unspecified_"+str(item_id)
else:
return "unpublished_"+str(item_id)
else:
return item_id
def add_tags(self,limit):
'''
Pull tags from the database
'''
statement = f'''
SELECT * FROM omek_records_tags
JOIN omek_tags on omek_records_tags.tag_id = omek_tags.id;
'''
self.tag_df = pd.read_sql(statement,DB)
self.objects = self.objects[:limit].apply(
lambda x : self.add_tag(x),axis=1)
def add_tag(self, row_ser):
'''
Add the tag to the list for each object
'''
new_subj_field = []
id = row_ser.loc['jade_id']
try:
tag_names = self.tag_df.loc[self.tag_df['record_id'] == int(id.split("_")[-1])]
if not tag_names.empty:
for name in tag_names['name'].to_list():
if name not in new_subj_field:
new_subj_field.append(name)
row_ser['dcterms_subject'] = new_subj_field
return row_ser
except:
return row_ser
def add_collections(self,limit):
'''
Pull collections from the database
'''
statement = '''
SELECT omek_collections.id as collection_id, `text` as collection_name FROM omek_collections
JOIN omek_element_texts AS texts ON omek_collections.id = texts.record_id
WHERE record_type = "Collection"
AND element_id = 50
AND public = 1;
'''
self.collection_df = pd.read_sql(statement,DB)
self.collection_df = self.collection_df.set_index('collection_id')
self.objects = self.objects[:limit].apply(
lambda x : self.add_collection(x),
axis=1
)
def add_collection(self,row_ser):
'''
Add the collection to the list for each object
'''
new_collection_field = []
ids = row_ser.loc['jade_collection']
if not isinstance(ids, list):
ids = [ids]
try:
for coll_id in ids:
matches = self.collection_df.at[coll_id,'collection_name']
if isinstance(matches,np.ndarray):
match_list = matches.tolist()
elif isinstance(matches,str):
match_list = [matches]
else:
print("Unrecognized type of collection",type(matches))
for name in match_list:
if name not in new_collection_field:
new_collection_field.append(name)
row_ser['jade_collection'] = new_collection_field
return row_ser
except:
return row_ser
def add_relations(self,limit=None):
'''
Ingest relation data from SQL
'''
# Read from SQL tables omek_item_relations_relations and omek_item_relations_properties
statement = f'''
SELECT relations.id as id, relations.subject_item_id AS subjId, properties.id as relId, properties.label AS relLabel, relations.object_item_id AS objId
FROM omek_item_relations_relations AS relations
JOIN omek_item_relations_properties AS properties ON relations.property_id = properties.id;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.relations = pd.read_sql(statement,DB,index_col='id')
# Style relation labels with camel case
self.relations['relLabel'] = self.relations['relLabel'].apply(
lambda x: camel(x))
# Set up data structure
data = {}
noise = {}
# Add the type prefix to the subject and object IDs
self.relations['subjId'] = self.relations['subjId'].apply(
lambda x: self.convert_to_jade_id(x))
self.relations['objId'] = self.relations['objId'].apply(
lambda x: self.convert_to_jade_id(x))
# Iterate through the relation set
iter = tqdm(self.relations.iterrows())
iter.set_description("Adding relations")
for tup in iter:
row = tup[1]
subjId = row['subjId']
relLabel = row['relLabel']
objId = row['objId']
if (
subjId in self.objects.index.values
) and (
objId in self.objects.index.values
):
# print(subjId,objId)
compile_json(data,subjId,relLabel,objId)
else:
compile_json(noise,subjId,relLabel,objId)
# Add locations to the relations
# This is a thorny call bramble that should probably be untangled in a future iteration of the script
locSet = LocationSet()
locSet.ingest(self,limit=limit)
data, noise = self.add_locations(locSet,data,noise)
# Add the compiled relation data into the main DataFrame and the noise bin
new_df = pd.DataFrame(data={"jade_relation":list(data.values())},index=list(data.keys()))
self.objects = pd.concat([self.objects,new_df],sort=False,axis=1)
new_noise_df = pd.DataFrame(data={"jade_relation":list(noise.values())},index=list(noise.keys()))
self.noise = pd.concat([self.noise,new_noise_df],sort=False,axis=1)
def add_locations(self,locSet,data,noise):
'''
Add locations from class object already constructed
'''
# Add the type prefix to the location and item IDs
locSet.locations['loc_id'] = locSet.locations['loc_id'].astype(str)
locSet.locations['loc_id'] = locSet.locations['loc_id'].apply(
lambda x : "location_" + str(x))
locSet.locations.rename(columns={'loc_id': 'jade_id'},inplace=True)
# Merge locations table into objects table
self.objects = pd.concat([self.objects,locSet.locations],axis=0)
self.objects = self.objects.set_index('jade_id',drop=False)
self.objects.index.name = None
dataset_ids = self.objects.index.values
self.location_duplicates = locSet.location_duplicates
# Iterate through the location set
iter = tqdm(locSet.locations.iterrows())
iter.set_description("Adding locations")
for tup in iter:
row = tup[1]
# Iterate through the collection of items for each location
for rel in list(row.loc['loc_relation'].items()):
loc_id = row.loc['jade_id']
desc_list = rel[1]
item_id = rel[0]
for desc in desc_list:
# Build up the data structure for the later DataFrame
if item_id in dataset_ids:
compile_json(data,item_id,desc,loc_id)
else:
compile_json(noise,item_id,desc,loc_id)
# Remove relations from locations table as they are now represented in item rows
self.objects = self.objects.drop("loc_relation",axis=1)
# Add location types
self.objects = self.objects.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.noise = self.noise.apply(
lambda ser : self.add_location_types(ser),
axis=1
)
self.objects = self.objects.dropna(subset=['jade_id'])
return data, noise
def add_location_types(self,row):
'''
Look for null type values and adds location if location in jade_id prefix
'''
try:
if pd.isnull(row.loc['jade_type']):
if type(row.loc['jade_id']) == type(""):
if row.loc['jade_id'].split("_")[0] == "location":
row.loc['jade_type'] = "Location"
else:
print("Type null but not location:",row)
else:
print('Dropped type not included:',row['jade_url'])
return row
except:
print("Unknown problem during adding location type for:",row)
def quantify(self):
'''
Run counting functions on properties and relations to create descriptive statistics about the data
'''
self.quant = {}
# Items
self.quant["item_property_count"] = self.objects.count()
# Item properties
self.quantify_properties()
# Item properties by type
self.quantify_properties_by_type()
# Relations (including location relations)
self.quantify_relations()
# Data nesting
self.quant['nesting'] = {}
self.check_nesting(self.objects)
def quantify_properties(self):
'''
Run counts of properties
'''
# Iterate through properties identified for faceting
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Quantifying subsets by facet")
for prop, lim in iter:
if prop in self.objects.columns.values:
# Special cases
if prop in ['dcterms_date']:
# Date
dc_dates_ser = self.objects[prop]
dc_dates_ser = dc_dates_ser.apply(unwrap_list)
dc_dates_ser = dc_dates_ser.dropna()
for id in dc_dates_ser.index.values:
try:
date_val = dc_dates_ser[id]
if not isinstance(date_val, list):
date_list = [date_val]
else:
date_list = date_val
for date_string in date_list:
if not isinstance(date_string, str):
date_string = str(date_string)
yearlike = date_string.split('-')[0]
if (
len(yearlike) == 4
) and (
int(yearlike[0]) == 1
) and (
yearlike[3] in '0123456789'
):
year = yearlike
dc_dates_ser[id] = str(year)
else:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
except:
dc_dates_ser.drop(id)
print('Dropped unrecognized date value:',id,dc_dates_ser[id])
if len(dc_dates_ser) > 1:
self.add_to_quant(
dc_dates_ser,
sort_on_property_name=False)
# All others / standard structure
else:
ser = self.objects[prop]
ser = ser.dropna()
if len(ser) > 1:
self.add_to_quant(ser)
def add_to_quant(
self,
series, # A named Series object whose index is the item or location IDs
# and whose values are non-empty strings or lists of strings
sort_on_property_name = False # Default False sorts by largest count. Optional True sorts alphabetically by property name
):
'''
Index the DataFrame's IDs by value of passed property (column name)
'''
property = series.name
# Create an index of jade_ids by property value for the series (column) passed
for id in series.index.values:
cell = series[id]
if isinstance(cell, np.ndarray):
cell = cell.tolist()
if not isinstance(cell, list):
cell = [cell]
for val in cell:
compile_json(
self.quant,
property,
val.strip() if isinstance(val, str) else val,
id)
# Create a dictionary of property values and instance counts
for val in list(self.quant[property].keys()):
compile_json(self.quant,
property+"_count",
val,
len(self.quant[property][val]))
# Sort the dictionary and add it to the dataset object
if not sort_on_property_name:
self.quant[property+"_count"] = dict(
sort_by_item_counts(self.quant[property+"_count"]))
self.quant[property+"_count"] = pd.Series(
self.quant[property+"_count"],
index=list(self.quant[property+"_count"].keys()),
name=property+"_count")
if sort_on_property_name:
self.quant[property+"_count"] = self.quant[property+"_count"].sort_index()
# Go ahead and unwrap the single-integer lists created by compile_json
self.quant[property+"_count"] = self.quant[property+"_count"].apply(unwrap_list)
def quantify_properties_by_type(self):
'''
Create a table of property counts by object type
'''
# Get a copy of the main DataFrame and send each row through the counter
self.quant['prop_table'] = {}
df = self.objects.copy()
df = df.apply(
lambda ser : self.compile_types_by_prop(ser),
axis=1
)
# Make the resulting dict a DataFrame, sort it, and abbreviate column headers
self.quant['prop_table'] = pd.DataFrame.from_dict(
self.quant['prop_table'],
orient='index')
self.quant['prop_table'] = self.quant['prop_table'][[
'Person',
'Text',
'Event',
'Organization',
'Publication',
'Location',
'All Types'
]]
self.quant['prop_table'] = self.quant['prop_table'].sort_index()
self.quant['prop_table'].rename(columns={'Organization':'Org.', 'Publication':'Pub.', 'Location':'Loc.'},inplace=True)
def compile_types_by_prop(self,ser):
'''
Count the properties in the passed series by object type
'''
jade_type = ser.loc['jade_type']
jade_type = unwrap_list(jade_type)
if jade_type in list(INCLUDE_PROPS.keys()):
for prop in ser.index.values:
if prop in INCLUDE_PROPS[jade_type]:
cell = ser.loc[prop]
if not isinstance(cell, list):
cell = [cell]
if not pd.isnull(cell).any():
if prop not in self.quant['prop_table']:
self.quant['prop_table'][prop] = {}
if "All Properties" not in self.quant['prop_table']:
self.quant['prop_table']['All Properties'] = {}
if jade_type not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop][jade_type] = 1
else:
self.quant['prop_table'][prop][jade_type] += 1
if "All Types" not in self.quant['prop_table'][prop]:
self.quant['prop_table'][prop]["All Types"] = 1
else:
self.quant['prop_table'][prop]["All Types"] += 1
if jade_type not in self.quant['prop_table']['All Properties']:
self.quant['prop_table']['All Properties'][jade_type] = 1
else:
self.quant['prop_table']['All Properties'][jade_type] += 1
return ser
def quantify_relations(self):
'''
Make a list of unique relation triples and a table of the most common subject–object pairs
'''
# Iterate through relations in the Dataset
uniq_rels = {}
count_df_index = []
count_df_columns = []
iter = tqdm(self.objects.index.values)
iter.set_description("Counting unique relations")
for subjId in iter:
row = self.objects.loc[subjId]
row_rels_dict = row.loc['jade_relation']
if not pd.isnull(row_rels_dict):
for relLabel, objIdList in row_rels_dict.items():
for objId in objIdList:
# Find the types of each subject and object
subjType = subjId.split('_')[0].capitalize()
objType = objId.split('_')[0].capitalize()
# Count the unique combinations of subject, relation, and object
rel = " ".join([subjType,relLabel,objType])
if rel not in uniq_rels:
uniq_rels[rel] = 1
else:
uniq_rels[rel] += 1
# Make the dimensions for a dataframe
if subjType not in count_df_index:
count_df_index.append(subjType)
if objType not in count_df_columns:
count_df_columns.append(objType)
# Sort and output simple list
self.quant["unique_relation_list"] = pd.DataFrame.from_dict(
dict(sort_by_item_counts(uniq_rels)),orient='index')
# Make the dataframe
count_df = pd.DataFrame(data=0,index=count_df_index,columns=count_df_columns)
for rel in list(uniq_rels.keys()):
count = uniq_rels[rel]
try:
subjType, relLabel, objType = rel.split(' ')
count_df.at[subjType,objType] += count
except:
print("Error counting relation:",rel)
self.quant["unique_relation_table"] = count_df
def check_nesting(self,df):
'''
Check whether each column in the passed df has repeating values in any of the rows
'''
for prop in df.columns.values:
column_ser = df[prop]
column_ser = column_ser.dropna()
self.is_nested(column_ser)
def is_nested(self,ser):
'''
Is the passed row repeating/nested?
'''
nested = False
for id, val in ser.iteritems():
if (
type(val) == type([])
) or (
type(val) == type({})
):
if len(val) > 1:
nested = True
self.quant['nesting'][ser.name] = nested
def unwrap_nonrepeating_columns(self):
'''
If a column hasn't been marked as nested, take its values out of the list wrappers
'''
for prop in self.objects.columns.values:
if not self.quant['nesting'][prop]:
self.objects[prop] = self.objects[prop].apply(unwrap_list)
def segment_by_type(self,df):
'''
Break up the passed dataframe by object type and return up to six separate frames that only have the properties belonging to their types
'''
type_segments = {}
for type_name in list(PROP_SET_LIST.keys()):
prospective_props = PROP_SET_LIST[type_name]
props_for_this_type = []
for prop in prospective_props:
if prop in df.columns.values:
props_for_this_type.append(prop)
segment_df = df[props_for_this_type]
segment_df = segment_df.loc[lambda text_df: text_df['jade_type'] == type_name, :]
type_segments[type_name] = segment_df
return type_segments
def export_stats(self):
'''
Export results from quantify to an XLSX file
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with open(
filepath+"jade_data_stats.md",
'w',
encoding='utf-8'
) as md_writer:
with pd.ExcelWriter(
filepath+"jade_data_stats.xlsx",
encoding='utf-8'
) as excel_writer:
for k in list(self.quant.keys()):
if k.split("_")[-1] in ["count", "list", "table"]:
md_writer.write(f"\n\n## {k}\n"+self.quant[k].to_markdown())
if isinstance(self.quant[k], pd.Series):
df = self.quant[k].apply(lambda x : colons_and_semicolons(x))
df = df.apply(lambda x: zap_illegal_characters(x))
else:
df = self.quant[k].applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,sheet_name=k)
def export_single_sheet(self):
'''
Export one big sheet that has all the objects and all the properties and relations (contains a lot of blank cells)
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"jade_data_single_sheet.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.objects.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='jade_data')
def export_complete_dataset(self):
'''
Export a complete, curated dataset, segmented by object type in the XLSX and CSV formats
'''
self.type_segments = self.segment_by_type(self.objects)
filepath = f'{OUT_DIR}{TS}-batch/complete_data/'
self.run_outputs(self.type_segments,filepath)
# filepath = f'{OUT_DIR}{TS}-batch/complete_data/Locations'
# self.run_outputs(self.locations,filepath)
def export_subsets(self):
'''
Manage creation of subsets by property value, using quant information
'''
props = list(DATASET_OPTIONS['SUBSET_PROPERTIES_AND_QUANTITIES'].items())
iter = tqdm(props)
iter.set_description("Exporting subsets by facet")
for prop, lim in iter:
if prop in self.quant:
self.create_subset(
prop,
self.quant[prop],
self.quant[prop+'_count'],
lim)
def create_subset(self,prop,attr_dict,ranked_attr_counts,lim):
'''
Create a subset for the passed property, using indexes in quant
'''
ranked_attr_list = list(ranked_attr_counts.keys())
for val in ranked_attr_list[:lim]:
filtered_jade_ids = attr_dict[val]
count = str(ranked_attr_counts[val])
# Items
df = self.objects[self.objects.index.isin(filtered_jade_ids)]
segmented_subset_dfs = self.segment_by_type(df)
safe_val_string = safen_string(val)
filepath = f'{OUT_DIR}{TS}-batch/filtered_subsets/{prop}/{safe_val_string} {count}/'
self.run_outputs(segmented_subset_dfs,filepath,filename=f'{prop} {safe_val_string} {count}')
def export_crumbs(self):
'''
Export a spreadsheet with noise from the RDBMS that did not conform to regular property usage. Does not yet contain relation noise. May have a bug with location noise, including too many locations. Also has a bug with respect to jade_id and jade_collection, leaving all of the regular values for those properties in.
'''
filepath = f'{OUT_DIR}{TS}-batch/'
if not os.path.exists(filepath):
os.makedirs(filepath)
with pd.ExcelWriter(
filepath+"sql_crumbs.xlsx",
encoding='utf-8'
) as excel_writer:
df = self.noise.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='item_noise')
df = self.location_duplicates.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
df.to_excel(excel_writer,index=False,sheet_name='location_noise')
def run_outputs(self,type_segment_dfs,filepath,filename='default'):
'''
Manages the outputs specified for the dfs passed
'''
if not os.path.exists(filepath):
os.makedirs(filepath)
tsdfs = type_segment_dfs
if DATASET_OPTIONS['EXPORT_XLSX']:
self.save_xlsx(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_CSV']:
self.save_csv(tsdfs,filepath,filename)
if DATASET_OPTIONS['EXPORT_JSON']:
self.save_json(tsdfs,filepath,filename)
text_df = tsdfs['Text']
if (
DATASET_OPTIONS['EXPORT_TXT']
) or (
DATASET_OPTIONS['EXPORT_HTML']
):
if len(text_df) > 0:
self.save_txt_and_html(text_df,filepath,filename)
def save_xlsx(self,tsdfs,filepath,filename):
'''
Run an XLSX export, putting multiple tables in a single workbook
'''
with pd.ExcelWriter(
f"{filepath}{'jade_data' if filename == 'default' else filename}.xlsx",
encoding='utf-8'
) as excel_writer:
for name, df in list(tsdfs.items()):
df = df.applymap(lambda x : colons_and_semicolons(x))
df = df.applymap(lambda x: zap_illegal_characters(x))
if len(df) > 0:
df.to_excel(excel_writer,index=False,sheet_name=name)
def save_csv(self,tsdfs,filepath,filename):
'''
Run a CSV export, using a subdirectory for multiples
'''
filepath+=f"{'jade_data' if filename == 'default' else filename}_csv"
if not os.path.exists(filepath):
os.makedirs(filepath)
for name, df in list(tsdfs.items()):
if len(df) > 0:
df.to_csv(f'{filepath}/jade_{name}.csv',index=False)
def save_json(self,tsdfs,filepath,filename):
'''
Run a JSON export, putting all the objects at the same level (no type segments) or wrapping them, depending on options
'''
json_output = {}
if DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output[name] = json.loads(df.to_json(orient='index'))
if not DATASET_OPTIONS['WRAP_JSON_RECORDS_IN_TYPE_BRANCHES']:
for name, df in list(tsdfs.items()):
json_output.update(json.loads(df.to_json(orient='index')))
with open(filepath+f"{'jade_data' if filename == 'default' else filename}.json",'w') as fileref:
fileref.write(json.dumps(json_output))
def save_txt_and_html(self,df,filepath,filename):
'''
Run export of texts, using subdirectories by format
'''
if DATASET_OPTIONS['EXPORT_TXT']:
txt_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_txt/"
if not os.path.exists(txt_filepath):
os.makedirs(txt_filepath)
if DATASET_OPTIONS['EXPORT_HTML']:
html_filepath = filepath+f"{'jade_texts' if filename == 'default' else filename}_html/"
if not os.path.exists(html_filepath):
os.makedirs(html_filepath)
# Iterate through the text column
text_ser = df["jade_text"]
text_ser = text_ser.dropna()
text_ser = text_ser.apply(unwrap_list)
for jade_id, val in text_ser.iteritems():
# Manage whether values are wrapped in lists
if not isinstance(val, list):
val_list = [val]
for val in val_list:
if not pd.isnull(val):
# Check whether value is html
is_html = False
if "<" in val:
if ">" in val:
is_html = True
# Run HTML and TXT exports
if is_html:
soup = BeautifulSoup(val,'html.parser')
if DATASET_OPTIONS['EXPORT_HTML']:
with open(html_filepath+jade_id+'.html','w',encoding='utf-8') as html_ref:
html_ref.write(soup.prettify())
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(text_with_newlines(soup))
else:
if DATASET_OPTIONS['EXPORT_TXT']:
with open(txt_filepath+jade_id+'.txt','w',encoding='utf-8') as txt_ref:
txt_ref.write(val)
class LocationSet():
'''
A class to hold locations in the few seconds before they get subsumed into the dataset object
'''
# A dummy init function
def __init__(self):
pass
# Ingest location data from SQL
def ingest(self,dataset,limit=None):
# Read from SQL table omek_locations
statement = f'''
SELECT * FROM omek_locations;
'''
if limit != None:
statement = statement.split(';')[0] + f' LIMIT {str(limit)};'
self.omek_locations = pd.read_sql(statement,DB)
# Set up data structure for later DataFrame
data = {}
noise = {}
ids = []
retrieved = []
# Convert item IDs
self.omek_locations['item_id'] = self.omek_locations['item_id'].apply(
lambda x: dataset.convert_to_jade_id(x))
# Read data retrieved from SQL
iter = tqdm(self.omek_locations.iterrows())
iter.set_description("Ingesting locations")
for tup in iter:
row = tup[1]
loc_id = row.loc['id']
if (
loc_id not in retrieved
) and (
row.loc['item_id'] in dataset.objects.index.values
):
cluster_address_versions = {}
# Check for duplicates
addr_fp = fingerprint(row.loc["address"])
cluster_statement = f'''
SELECT * FROM omek_locations
WHERE latitude = {row.loc['latitude']}
AND longitude = {row.loc['longitude']};
'''
cluster = | pd.read_sql(cluster_statement,DB) | pandas.read_sql |
# -*- coding: utf-8 -*-
from dash import Dash
from dash.dependencies import Input, Output, ALL, State, MATCH, ALLSMALLER, ClientsideFunction
from Dashapps.Dash_fun import apply_layout_with_auth,apply_layout_without_auth, load_object, save_object
import dash_core_components as dcc
import dash_html_components as html
import dash
import dash_table
from dash_table.Format import Format, Group, Prefix, Scheme, Symbol
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import numpy as np
import pandas as pd
from Dashapps.Dash_base import warning_card, colors, cite_card, description_card
import datetime as dt
from ..compute_util.stockinterface import isTickerValid, getCorrelationMatrix, getPortfolioCorrelation,getCorrelationMatrix_List, getPortfolioCorrelation_List, getTickerDataframesList, getTickerDataframe
from flask import request
import locale
# from ./compute_util/stockinterface import isTickerValid
url_base = '/dash/app2/'
data_sources = [
"https://pypi.org/project/yfinance/"
]
data_licenses = [
"https://pypi.org/search/?c=License+%3A%3A+OSI+Approved+%3A%3A+Apache+Software+License"
]
cite_text = '"Forecasts may tell you a great deal about the forecaster; they tell you nothing about the future."'
cite_author = "<NAME>"
cite_link = "https://en.wikipedia.org/wiki/Warren_Buffett"
description_text = '''This tool can compute the correlation of your assets. It uses the ticker of yahoo and computes the correlation for all combinations of your assets. This is called the correlation matrix. The coorelation is computed on the daily, as well as on the monthly performance. We compute it for the maximum timeframe of the data. But additionally, you can also enter a custom timeframe. You have to be careful with currency. We don't check the currency, so **don't mix currencies**.'''
def get_dummy_df():
d = {'-': [0, 0], '- ': [0, 0]}
df = pd.DataFrame(data=d)
return df
df_corr = get_dummy_df()
def ticker_card():
return html.Div(
children=[
html.H3(children='Portfolio'),
dbc.Alert(
[
"You can get Tickers from www.finance.yahoo.com ",
# html.A(" www.finance.yahoo.com", href="https://finance.yahoo.com", className="alert-link", target='_blank'),
],
color="primary",
),
html.Div(children=[], id='container_ticker'),
dbc.Button('Add Ticker', color="secondary", id='add_ticker_button', n_clicks=1, className="mr-1"),
],
style={
'backgroundColor': colors['background'],
}
)
# The Layout
layout = html.Div(style={'font-family':'"Poppins", sans-serif', 'backgroundColor': colors['background']}, children=[
html.H1(
children='Correlation Matrix',
style={
'textAlign': 'center',
'color': colors['text'],
'backgroundColor': colors['background']
}
),
html.Div(children=description_card(description_text), style={
'textAlign': 'center',
'color': colors['text'],
'backgroundColor': colors['background']
}),
html.Div(children=cite_card(cite_text,cite_author,cite_link), style={
'textAlign': 'center',
'color': colors['text'],
'backgroundColor': colors['background']
}),
html.Br(),
ticker_card(),
html.Br(),
html.P("Custom Timeframe (Optional):",style={"font-style": "italic" }),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=dt.datetime(1971,1,1),
max_date_allowed= dt.datetime.now(),
initial_visible_month=dt.datetime.now(),
start_date=dt.datetime(2019,1,1),
end_date=dt.datetime.now()
),
html.Br(),
html.Br(),
dbc.Button("Compute (Takes a few minutes!)", id="compute-button", color="primary", block=True),
html.Span(id="compute-output", style={"vertical-align": "middle","font-style": "italic" }),
html.Br(),
html.Br(),
html.H3(children='Result'),
html.P(children='Daily - Maximum Timeframe',style={"font-style": "italic" }),
dash_table.DataTable(
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
'if': {'column_id': 'Ticker'},
'backgroundColor': 'rgb(230, 230, 230)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
# 'fontWeight': 'bold'
},
style_cell={
'font-family':'"Poppins", sans-serif',
},
id='compute-table-daily',
columns=[{"name": i, "id": i} for i in df_corr.columns],
data=df_corr.to_dict('records')
),
html.Br(),
html.P(children='Monthly - Maximum Timeframe',style={"font-style": "italic" }),
dash_table.DataTable(
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
'if': {'column_id': 'Ticker'},
'backgroundColor': 'rgb(230, 230, 230)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
# 'fontWeight': 'bold'
},
style_cell={
'font-family':'"Poppins", sans-serif'
},
id='compute-table-monthly',
columns=[{"name": i, "id": i} for i in df_corr.columns],
data=df_corr.to_dict('records'),
),
html.Br(),
html.P(children='Daily - Custom Timeframe',style={"font-style": "italic" }),
dash_table.DataTable(
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
'if': {'column_id': 'Ticker'},
'backgroundColor': 'rgb(230, 230, 230)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
# 'fontWeight': 'bold'
},
style_cell={
'font-family':'"Poppins", sans-serif'
},
id='compute-table-daily_c',
columns=[{"name": i, "id": i} for i in df_corr.columns],
data=df_corr.to_dict('records'),
),
html.Br(),
html.P(children='Monthly - Custom Timeframe',style={"font-style": "italic" }),
dash_table.DataTable(
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
'if': {'column_id': 'Ticker'},
'backgroundColor': 'rgb(230, 230, 230)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
# 'fontWeight': 'bold'
},
style_cell={
'font-family':'"Poppins", sans-serif'
},
id='compute-table-monthly_c',
columns=[{"name": i, "id": i} for i in df_corr.columns],
data=df_corr.to_dict('records'),
),
html.Br(),
html.P(children='Portfolio Correlations',style={"font-style": "italic" }),
dash_table.DataTable(
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
'if': {'column_id': 'Correlation'},
'backgroundColor': 'rgb(230, 230, 230)',
'color' : 'blue',
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
# 'fontWeight': 'bold'
},
style_cell={
'font-family':'"Poppins", sans-serif'
},
id='portfolio-table',
columns=[{"name": i, "id": i} for i in df_corr.columns],
data=df_corr.to_dict('records'),
),
html.Br(),
html.Div(children=warning_card(data_sources,data_licenses), style={
'textAlign': 'left',
'color': colors['text'],
'backgroundColor': colors['background']
})
])
def isValid_tickers(ticker_values):
for i in range(len(ticker_values)):
if ticker_values[i]=="": return [i,False]
for i in range(len(ticker_values)):
if not isTickerValid(ticker_values[i]): return [i,False]
return [0,True]
def isValid_percents(percent_values):
sum=0
for percent in percent_values:
percent = float(percent)
sum+=percent
if (sum==0): return 0
if (sum==100): return 1
return 2
def getAllPortfolioCorrelation_List(dfList,tickers,percents,start,end):
benchTickers = []
benchTickers.append('IWDA.AS')
benchTickers.append('SPY')
benchTickers.append('EXSA.MI')
benchName = []
benchName.append('MSCI World')
benchName.append('S&P 500')
benchName.append('Stoxx Europe 600')
benchDataframes = []
benchDataframes.append(getTickerDataframe(benchTickers[0]))
benchDataframes.append(getTickerDataframe(benchTickers[1]))
benchDataframes.append(getTickerDataframe(benchTickers[2]))
pfResults = []
for i in range(len(benchTickers)):
pfCorr=[]
pfCorr.append(benchName[i])
pfCorr.append('Daily')
pfCorrRes = getPortfolioCorrelation_List(dfList,percents,benchDataframes[i])
for a in pfCorrRes:
pfCorr.append(a)
pfCorrMonthly = []
pfCorrMonthly.append(benchName[i])
pfCorrMonthly.append('Monthly')
pfCorrMonthlyRes = getPortfolioCorrelation_List(dfList,percents,benchDataframes[i], daily=False)
for a in pfCorrMonthlyRes:
pfCorrMonthly.append(a)
pfCorrCustom = []
pfCorrCustom.append(benchName[i])
pfCorrCustom.append('Daily')
pfCorrCustomRes = getPortfolioCorrelation_List(dfList,percents,benchDataframes[i], start,end)
for a in pfCorrCustomRes:
pfCorrCustom.append(a)
pfCorrCustomMonthly = []
pfCorrCustomMonthly.append(benchName[i])
pfCorrCustomMonthly.append('Monthly')
pfCorrCustomMonthlyRes = getPortfolioCorrelation_List(dfList,percents,benchDataframes[i],start,end, daily=False)
for a in pfCorrCustomMonthlyRes:
pfCorrCustomMonthly.append(a)
pfResults.append(pfCorr)
pfResults.append(pfCorrMonthly)
pfResults.append(pfCorrCustom)
pfResults.append(pfCorrCustomMonthly)
# print (pfResults)
result = pd.DataFrame.from_records(pfResults)
result.columns = ['Benchmark:', 'Intervall', 'Correlation', 'from', 'to']
return result
def getAllPortfolioCorrelation(tickers,percents,start,end):
benchTickers = []
benchTickers.append('IWDA.AS')
benchTickers.append('SPY')
benchTickers.append('EXSA.MI')
benchName = []
benchName.append('MSCI World')
benchName.append('S&P 500')
benchName.append('Stoxx Europe 600')
pfResults = []
for i in range(len(benchTickers)):
pfCorr=[]
pfCorr.append(benchName[i])
pfCorr.append('Daily')
pfCorrRes = getPortfolioCorrelation(tickers,percents,benchTickers[i])
for a in pfCorrRes:
pfCorr.append(a)
pfCorrMonthly = []
pfCorrMonthly.append(benchName[i])
pfCorrMonthly.append('Monthly')
pfCorrMonthlyRes = getPortfolioCorrelation(tickers,percents,benchTickers[i], daily=False)
for a in pfCorrMonthlyRes:
pfCorrMonthly.append(a)
pfCorrCustom = []
pfCorrCustom.append(benchName[i])
pfCorrCustom.append('Daily')
pfCorrCustomRes = getPortfolioCorrelation(tickers,percents,benchTickers[i], start,end)
for a in pfCorrCustomRes:
pfCorrCustom.append(a)
pfCorrCustomMonthly = []
pfCorrCustomMonthly.append(benchName[i])
pfCorrCustomMonthly.append('Monthly')
pfCorrCustomMonthlyRes = getPortfolioCorrelation(tickers,percents,benchTickers[i],start,end, daily=False)
for a in pfCorrCustomMonthlyRes:
pfCorrCustomMonthly.append(a)
pfResults.append(pfCorr)
pfResults.append(pfCorrMonthly)
pfResults.append(pfCorrCustom)
pfResults.append(pfCorrCustomMonthly)
# print (pfResults)
result = pd.DataFrame.from_records(pfResults)
result.columns = ['Benchmark:', 'Intervall', 'Correlation', 'from', 'to']
return result
def Add_Dash(server):
app = Dash(server=server, url_base_pathname=url_base, external_stylesheets = [dbc.themes.BOOTSTRAP], external_scripts = ["https://cdn.plot.ly/plotly-locale-de-latest.js"], meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}])
app.config.suppress_callback_exceptions = True
apply_layout_without_auth(app, layout)
@app.callback(
[Output("compute-output", "children"),
Output(component_id='compute-table-daily', component_property='data'),
Output(component_id='compute-table-daily', component_property='columns'),
Output(component_id='compute-table-monthly', component_property='data'),
Output(component_id='compute-table-monthly', component_property='columns'),
Output(component_id='compute-table-daily_c', component_property='data'),
Output(component_id='compute-table-daily_c', component_property='columns'),
Output(component_id='compute-table-monthly_c', component_property='data'),
Output(component_id='compute-table-monthly_c', component_property='columns'),
Output(component_id='portfolio-table', component_property='data'),
Output(component_id='portfolio-table', component_property='columns')],
[Input(component_id={'type': 'dynamic-ticker', 'index': ALL}, component_property='value'),
Input(component_id={'type': 'dynamic-percent', 'index': ALL}, component_property='value'),
Input('compute-button', 'n_clicks'),
Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')]
)
def compute(ticker_values, percent_values,n_clicks,start_date, end_date):
request_locale = request.accept_languages.best_match(['en_US','de_DE'])
if (request_locale=='en_US'):
dash_locale = 'en'
sep_locale = "."
else:
dash_locale = 'de'
sep_locale = ","
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
msg = ""
df_corr_result_max = get_dummy_df()
columns_corr_result_max = [{'name': col, 'id': col, 'type': 'numeric', 'format' : dict(specifier='.2f', locale=dict(decimal=sep_locale)) } for col in df_corr_result_max.columns]
if 'compute-button' in changed_id:
if len(ticker_values)<2: return 'You need at least 2 tickers.', df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max
result_percents = isValid_percents(percent_values)
if (result_percents==2): return 'Sum of percent needs to be 0 or 100', df_corr_result_max.to_dict(orient='records'), columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max
result_tickers = isValid_tickers(ticker_values)
if not result_tickers[1]:
return 'Ticker at position {} cannot be found on yahoo'.format(result_tickers[0]+1), df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max, df_corr_result_max.to_dict(orient='records'),columns_corr_result_max
print ('Start Correlation Computation')
dfList = getTickerDataframesList(ticker_values)
corr_result_max = getCorrelationMatrix_List(dfList, daily=True)
corr_result_max_monthly = getCorrelationMatrix_List(dfList, daily=False)
# print(start_date)
# print(end_date)
corr_result_max_c = getCorrelationMatrix_List(dfList, filterStart=start_date, filterEnd=end_date, daily=True)
corr_result_max_monthly_c = getCorrelationMatrix_List(dfList, filterStart=start_date, filterEnd=end_date, daily=False)
# print(corr_result_max)
df_corr_result_max = pd.DataFrame(data=corr_result_max[0], index=ticker_values, columns=ticker_values)
df_corr_result_max.insert(loc=0, column='Ticker', value=ticker_values)
df_corr_result_max_monthly = | pd.DataFrame(data=corr_result_max_monthly[0], index=ticker_values, columns=ticker_values) | pandas.DataFrame |
import time
from ParamTuning.Optimizer import Optimizer
from Utils.Data import Data
import pandas as pd
from Utils.Data.Data import get_dataset_xgb_batch
from Utils.Data.Features.Generated.EnsemblingFeature.LGBMEnsemblingFeature import LGBMEnsemblingFeature
from sklearn.model_selection import train_test_split
import time
import Blending.like_params as like_params
import Blending.reply_params as reply_params
import Blending.retweet_params as retweet_params
import Blending.comment_params as comment_params
from Utils.Data.Features.Generated.EnsemblingFeature.XGBEnsembling import XGBEnsembling
import argparse
def main():
# Instantiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('label', type=str,
help='required argument: label')
args = parser.parse_args()
LABEL = args.label
assert LABEL in ["like", "reply", "retweet", "comment"], "LABEL not valid."
print(f"label is {LABEL}")
features = ["raw_feature_creator_follower_count",
"raw_feature_creator_following_count",
"raw_feature_engager_follower_count",
"raw_feature_engager_following_count",
"raw_feature_creator_is_verified",
"raw_feature_engager_is_verified",
"raw_feature_engagement_creator_follows_engager",
"tweet_feature_number_of_photo",
"tweet_feature_number_of_video",
"tweet_feature_number_of_gif",
"tweet_feature_number_of_media",
"tweet_feature_is_retweet",
"tweet_feature_is_quote",
"tweet_feature_is_top_level",
"tweet_feature_number_of_hashtags",
"tweet_feature_creation_timestamp_hour",
"tweet_feature_creation_timestamp_week_day",
# "tweet_feature_number_of_mentions",
"tweet_feature_token_length",
"tweet_feature_token_length_unique",
"tweet_feature_text_topic_word_count_adult_content",
"tweet_feature_text_topic_word_count_kpop",
"tweet_feature_text_topic_word_count_covid",
"tweet_feature_text_topic_word_count_sport",
"number_of_engagements_with_language_like",
"number_of_engagements_with_language_retweet",
"number_of_engagements_with_language_reply",
"number_of_engagements_with_language_comment",
"number_of_engagements_with_language_negative",
"number_of_engagements_with_language_positive",
"number_of_engagements_ratio_like",
"number_of_engagements_ratio_retweet",
"number_of_engagements_ratio_reply",
"number_of_engagements_ratio_comment",
"number_of_engagements_ratio_negative",
"number_of_engagements_ratio_positive",
"number_of_engagements_between_creator_and_engager_like",
"number_of_engagements_between_creator_and_engager_retweet",
"number_of_engagements_between_creator_and_engager_reply",
"number_of_engagements_between_creator_and_engager_comment",
"number_of_engagements_between_creator_and_engager_negative",
"number_of_engagements_between_creator_and_engager_positive",
"creator_feature_number_of_like_engagements_received",
"creator_feature_number_of_retweet_engagements_received",
"creator_feature_number_of_reply_engagements_received",
"creator_feature_number_of_comment_engagements_received",
"creator_feature_number_of_negative_engagements_received",
"creator_feature_number_of_positive_engagements_received",
"creator_feature_number_of_like_engagements_given",
"creator_feature_number_of_retweet_engagements_given",
"creator_feature_number_of_reply_engagements_given",
"creator_feature_number_of_comment_engagements_given",
"creator_feature_number_of_negative_engagements_given",
"creator_feature_number_of_positive_engagements_given",
"engager_feature_number_of_like_engagements_received",
"engager_feature_number_of_retweet_engagements_received",
"engager_feature_number_of_reply_engagements_received",
"engager_feature_number_of_comment_engagements_received",
"engager_feature_number_of_negative_engagements_received",
"engager_feature_number_of_positive_engagements_received",
"number_of_engagements_like",
"number_of_engagements_retweet",
"number_of_engagements_reply",
"number_of_engagements_comment",
"number_of_engagements_negative",
"number_of_engagements_positive",
"engager_feature_number_of_previous_like_engagement",
"engager_feature_number_of_previous_reply_engagement",
"engager_feature_number_of_previous_retweet_engagement",
"engager_feature_number_of_previous_comment_engagement",
"engager_feature_number_of_previous_positive_engagement",
"engager_feature_number_of_previous_negative_engagement",
"engager_feature_number_of_previous_engagement",
"engager_feature_number_of_previous_like_engagement_ratio_1",
"engager_feature_number_of_previous_reply_engagement_ratio_1",
"engager_feature_number_of_previous_retweet_engagement_ratio_1",
"engager_feature_number_of_previous_comment_engagement_ratio_1",
"engager_feature_number_of_previous_positive_engagement_ratio_1",
"engager_feature_number_of_previous_negative_engagement_ratio_1",
"engager_feature_number_of_previous_like_engagement_ratio",
"engager_feature_number_of_previous_reply_engagement_ratio",
"engager_feature_number_of_previous_retweet_engagement_ratio",
"engager_feature_number_of_previous_comment_engagement_ratio",
"engager_feature_number_of_previous_positive_engagement_ratio",
"engager_feature_number_of_previous_negative_engagement_ratio",
"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_engager",
# "tweet_feature_number_of_previous_like_engagements",
# "tweet_feature_number_of_previous_reply_engagements",
# "tweet_feature_number_of_previous_retweet_engagements",
# "tweet_feature_number_of_previous_comment_engagements",
# "tweet_feature_number_of_previous_positive_engagements",
# "tweet_feature_number_of_previous_negative_engagements",
"creator_feature_number_of_previous_like_engagements_given",
"creator_feature_number_of_previous_reply_engagements_given",
"creator_feature_number_of_previous_retweet_engagements_given",
"creator_feature_number_of_previous_comment_engagements_given",
"creator_feature_number_of_previous_positive_engagements_given",
"creator_feature_number_of_previous_negative_engagements_given",
"creator_feature_number_of_previous_like_engagements_received",
"creator_feature_number_of_previous_reply_engagements_received",
"creator_feature_number_of_previous_retweet_engagements_received",
"creator_feature_number_of_previous_comment_engagements_received",
"creator_feature_number_of_previous_positive_engagements_received",
"creator_feature_number_of_previous_negative_engagements_received",
"engager_feature_number_of_previous_like_engagement_with_language",
"engager_feature_number_of_previous_reply_engagement_with_language",
"engager_feature_number_of_previous_retweet_engagement_with_language",
"engager_feature_number_of_previous_comment_engagement_with_language",
"engager_feature_number_of_previous_positive_engagement_with_language",
"engager_feature_number_of_previous_negative_engagement_with_language",
"engager_feature_knows_hashtag_positive",
"engager_feature_knows_hashtag_negative",
"engager_feature_knows_hashtag_like",
"engager_feature_knows_hashtag_reply",
"engager_feature_knows_hashtag_rt",
"engager_feature_knows_hashtag_comment",
"creator_and_engager_have_same_main_language",
"is_tweet_in_creator_main_language",
"is_tweet_in_engager_main_language",
# "statistical_probability_main_language_of_engager_engage_tweet_language_1",
# "statistical_probability_main_language_of_engager_engage_tweet_language_2",
"creator_and_engager_have_same_main_grouped_language",
"is_tweet_in_creator_main_grouped_language",
"is_tweet_in_engager_main_grouped_language",
# # "hashtag_similarity_fold_ensembling_positive",
# # "link_similarity_fold_ensembling_positive",
# # "domain_similarity_fold_ensembling_positive"
"tweet_feature_creation_timestamp_hour_shifted",
"tweet_feature_creation_timestamp_day_phase",
"tweet_feature_creation_timestamp_day_phase_shifted"
]
label = [
f"tweet_feature_engagement_is_{LABEL}"
]
train_dataset = "cherry_train"
val_dataset = "cherry_val"
test_dataset = "new_test"
if LABEL in ["like"]:
lgbm_params = like_params.lgbm_get_params()
xgb_params = like_params.xgb_get_params()
elif LABEL in ["reply"]:
lgbm_params = reply_params.lgbm_get_params()
xgb_params = reply_params.xgb_get_params()
elif LABEL in ["retweet"]:
lgbm_params = retweet_params.lgbm_get_params()
xgb_params = retweet_params.xgb_get_params()
elif LABEL in ["comment"]:
lgbm_params = comment_params.lgbm_get_params()
xgb_params = comment_params.xgb_get_params()
else:
assert False, "What?"
categorical_features_set = set([])
# Load train data
# loading_data_start_time = time.time()
# df_train, df_train_label = Data.get_dataset_xgb(train_dataset, features, label)
# print(f"Loading train data time: {loading_data_start_time - time.time()} seconds")
# Load val data
df_val, df_val_label = Data.get_dataset_xgb(val_dataset, features, label)
# Load test data
df_test = Data.get_dataset(features, test_dataset)
new_index = pd.Series(df_test.index).map(lambda x: x + len(df_val))
df_test.set_index(new_index, inplace=True)
# df to be predicted by the lgbm blending feature
df_to_predict = | pd.concat([df_val, df_test]) | pandas.concat |
import os
import numpy as np
import pandas as pd
import framework.constants as cs
from io import StringIO
from framework.representations.embedding import Embedding
from framework.util import scaleInRange
from framework.util import drop_duplicates
heads_vad = ['Word','Valence','Arousal','Dominance']
heads_be5 = ['Word','Joy','Anger','Sadness','Fear','Disgust']
#### ENGLISH
def load_anew10():
anew = pd.read_csv(cs.anew10, sep = '\t')
anew = anew[['Word','ValMn','AroMn','DomMn']]
anew.columns = ['Word', 'Valence', 'Arousal',
'Dominance']
anew.set_index('Word', inplace=True)
return anew
def load_anew99():
anew=pd.read_csv(cs.anew99, sep='\t')
anew.columns=heads_vad
anew.set_index('Word', inplace=True)
anew=drop_duplicates(anew)
return anew
def load_stevenson07():
stevenson07=pd.read_excel(cs.stevenson07)
stevenson07=stevenson07[['word','mean_hap','mean_ang','mean_sad',
'mean_fear','mean_dis']]
stevenson07.columns=['Word', 'Joy','Anger','Sadness','Fear','Disgust']
stevenson07.set_index('Word', inplace=True)
return stevenson07
def load_warriner13():
warriner13 = pd.read_csv(cs.warriner13, sep=',')
warriner13=warriner13[['Word','V.Mean.Sum', 'A.Mean.Sum', 'D.Mean.Sum']]
warriner13.columns=heads_vad
warriner13.set_index('Word',inplace=True)
#print(warriner13.head())
#print(warriner13.shape)
return warriner13
# #### SPANISH
def load_redondo07():
redondo07=pd.read_excel(cs.redondo07)
redondo07=redondo07[['S-Word','Val-Mn-All','Aro-Mn-All','Dom-Mn-All']]
redondo07.columns = heads_vad
redondo07.set_index('Word', inplace=True)
#print(redondo07.head())
#print(redondo07.shape)
return redondo07
def load_ferre16():
ferre16=pd.read_excel(cs.ferre16)
ferre16=ferre16[['Spanish_Word','Hap_Mean','Ang_Mean','Sad_Mean',
'Fear_Mean','Disg_Mean']]
ferre16.columns=heads_be5
ferre16.set_index('Word', inplace=True)
#print(ferre16.head())
#print(ferre16.shape)
return ferre16
# #### POLISH
def load_riegel15():
riegel15=pd.read_excel(cs.riegel15)
riegel15=riegel15[['NAWL_word','val_M_all','aro_M_all']]
riegel15.columns=['Word','Valence','Arousal']
riegel15['Valence']=scaleInRange(riegel15['Valence'],
oldmin=-3,
oldmax=3,
newmin=1,
newmax=9)
riegel15['Arousal']=scaleInRange(riegel15['Arousal'],
oldmin=1,
oldmax=5,
newmin=1,
newmax=9)
riegel15.set_index('Word', inplace=True)
return riegel15
def load_wierzba15():
wierzba15 = pd.read_excel(cs.wierzba15)
wierzba15=wierzba15[['NAWL_word', 'hap_M_all', 'ang_M_all', 'sad_M_all',
'fea_M_all', 'dis_M_all']]
wierzba15.columns=heads_be5
wierzba15.set_index('Word', inplace=True)
## rescaling basic emotions
## Scaling
for cat in ['Joy', 'Anger', 'Sadness', 'Fear', 'Disgust']:
wierzba15[cat] = [scaleInRange(x=x, oldmin=1.,
oldmax=7., newmin=1., newmax=5.)
for x in wierzba15[cat]]
# print(wierzba15.head())
# print(wierzba15.shape)
return wierzba15
def load_imbir16():
imbir16 = pd.read_excel(cs.imbir16)
imbir16 = imbir16[['polish word', 'Valence_M', 'arousal_M', 'dominance_M']]
imbir16.columns=heads_vad
imbir16.set_index('Word', inplace=True)
# print(imbir16.head())
# print(imbir16.shape)
return imbir16
# ### GERMAN
def load_schmidtke14(lower_case=False):
schmidtke14=pd.read_excel(cs.schmidtke14)
# schmidtke14=schmidtke14[['Word','Valence','Arousal','Dominance']]
schmidtke14=schmidtke14[['G-word', 'VAL_Mean', 'ARO_Mean_(ANEW)', 'DOM_Mean']]
schmidtke14.columns=['Word', 'Valence', 'Arousal', 'Dominance']
# schmidtke14['Word']=schmidtke14['Word'].str.lower()
schmidtke14.set_index('Word', inplace=True)
if lower_case:
schmidtke14.index=schmidtke14.index.str.lower()
#schmidtke14=schmidtke14[~schmidtke14.index.duplicated(keep='first')]
schmidtke14=drop_duplicates(schmidtke14)
schmidtke14.Valence = [scaleInRange(x = x, oldmin = -3.,
oldmax = 3., newmin = 1., newmax=9.)
for x in schmidtke14.Valence]
# ### setting word column to lower case for compatiblity with briesemeister11
# # print(schmidtke14.head())
# # print(schmidtke14.shape)
return schmidtke14
def load_briesemeister11():
briesemeister11=pd.read_excel(cs.briesemeister11)
briesemeister11=briesemeister11[['WORD_LOWER', 'HAP_MEAN', 'ANG_MEAN',
'SAD_MEAN', 'FEA_MEAN', 'DIS_MEAN']]
briesemeister11.columns=heads_be5
briesemeister11.set_index('Word', inplace=True)
briesemeister11=drop_duplicates(briesemeister11)
# print(briesemeister11.head())
# print(briesemeister11.shape)
return briesemeister11
def load_hinojosa16():
hinojosa16a=pd.read_excel(cs.hinojosa16a)
hinojosa16a=hinojosa16a[['Word','Val_Mn', 'Ar_Mn', 'Hap_Mn', 'Ang_Mn','Sad_Mn',
'Fear_Mn', 'Disg_Mn']]
hinojosa16a.columns=['Word', 'Valence', 'Arousal',
'Joy','Anger','Sadness','Fear','Disgust']
hinojosa16a.set_index('Word', inplace=True)
hinojosa16b=pd.read_excel(cs.hinojosa16b)
hinojosa16b=hinojosa16b[['Word', 'Dom_Mn']]
hinojosa16b.columns=['Word','Dominance']
hinojosa16b.set_index('Word', inplace=True)
hinojosa=hinojosa16a.join(hinojosa16b, how='inner')
hinojosa=hinojosa[['Valence', 'Arousal', 'Dominance',
'Joy', 'Anger', 'Sadness', 'Fear', 'Disgust']]
return hinojosa
def load_stadthagen16():
stadthagen16=pd.read_csv(cs.stadthagen16, encoding='cp1252')
stadthagen16=stadthagen16[['Word', 'ValenceMean', 'ArousalMean']]
stadthagen16.columns=['Word', 'Valence', 'Arousal']
stadthagen16.set_index('Word', inplace=True)
return stadthagen16
def load_stadthagen17():
'''
Full lexicon including BE5 and VA
'''
df=pd.read_csv(cs.stadthagen17, encoding='cp1252')
df=df[['Word', 'Valence_Mean', 'Arousal_Mean', 'Happiness_Mean',
'Anger_Mean', 'Sadness_Mean', 'Fear_Mean', 'Disgust_Mean']]
df.columns=['Word','Valence','Arousal', 'Joy','Anger', 'Sadness', 'Fear',
'Disgust']
df.set_index('Word', inplace=True)
return df
def load_kanske10():
with open(cs.kanske10, encoding='cp1252') as f:
kanske10=f.readlines()
# Filtering out the relevant portion of the provided file
kanske10=kanske10[7:1008]
# Creating data frame from string:
#https://stackoverflow.com/questions/22604564/how-to-create-a-pandas-dataframe-from-string
kanske10=pd.read_csv(StringIO(''.join(kanske10)), sep='\t')
kanske10=kanske10[['word', 'valence_mean','arousal_mean']]
kanske10.columns=['Word', 'Valence', 'Arousal']
kanske10['Word']=kanske10['Word'].str.lower()
kanske10.set_index('Word', inplace=True)
return kanske10
def load_vo09():
df=pd.read_csv(cs.vo09, sep=';')
df=df[['WORD_LOWER', 'EMO_MEAN','AROUSAL_MEAN']]
df.columns=['Word', 'Valence', 'Arousal']
df.set_index('Word', inplace=True)
# usecols='WORD_LOWER', 'EMO_MEAN','AROUSAL_MEAN', '')
df['Valence']=scaleInRange( x=df['Valence'],
oldmin=-3,
oldmax=3,
newmin=1,
newmax=9)
df['Arousal']=scaleInRange( x=df['Arousal'],
oldmin=1,
oldmax=5,
newmin=1,
newmax=9)
return df
def load_guasch15():
guasch15=pd.read_excel(cs.guasch15)
guasch15=guasch15[['Word','VAL_M', 'ARO_M']]
guasch15.columns=['Word', 'Valence', 'Arousal']
guasch15.set_index('Word', inplace=True)
return guasch15
def load_moors13():
# with open(cs.moors13) as f:
# moors13=f.readlines()
# moors13=moors13[1:]
# moors13=pd.read_excel(StringIO(''.join(moors13)))
moors13=pd.read_excel(cs.moors13, header=1)
moors13=moors13[['Words', 'M V', 'M A', 'M P']]
moors13.columns=heads_vad
moors13.set_index('Word', inplace=True)
# print(moors13)
return moors13
def load_montefinese14():
montefinese14=pd.read_excel(cs.montefinese14, header=1)
montefinese14=montefinese14[['Ita_Word', 'M_Val', 'M_Aro', 'M_Dom']]
montefinese14.columns=heads_vad
montefinese14.set_index('Word', inplace=True)
return montefinese14
def load_soares12():
soares12=pd.read_excel(cs.soares12, sheetname=1)
soares12=soares12[['EP-Word', 'Val-M', 'Arou-M', 'Dom-M']]
soares12.columns=heads_vad
soares12.set_index('Word', inplace=True)
return soares12
def load_sianipar16():
sianipar16=pd.read_excel(cs.sianipar16)
sianipar16=sianipar16[['Words (Indonesian)', 'ALL_Valence_Mean', 'ALL_Arousal_Mean', 'ALL_Dominance_Mean']]
sianipar16.columns=heads_vad
sianipar16.set_index('Word', inplace=True)
#sianipar16=sianipar16[~sianipar16.index.duplicated(keep='first')]
sianipar16=drop_duplicates(sianipar16)
return sianipar16
def load_yu16():
'''
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>.
(2016). Building Chinese Affective Resources in Valence-Arousal Dimensions.
In Proceedings of NAACL-2016.
'''
yu16= | pd.read_csv(cs.yu16) | pandas.read_csv |
import pandas as pd
import os
"""
Prepara Archivos
"""
textos = os.listdir('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales')
bdd = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Latam3.csv')
bdd = bdd.loc[bdd['Tipo convocatoria'] == 'Investigación-Innovación']
"""
Tokens español
"""
gramas_esp = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_final.csv')
# Convierte ods en lista
class_ods_esp = []
for list_osd in gramas_esp['ODS']:
list_osd = list_osd.lower().replace(':', ';').split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o)
class_ods_esp.append(list_osd2)
gramas_esp['ODS'] = class_ods_esp
"""
Tokens portugues
"""
gramas_por = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Gramas_protugues.csv')
# convierte Ods en lista
class_ods_por = []
for list_osd in gramas_por['ODS']:
list_osd = list_osd.lower().split(';')
list_osd2 = []
for o in list_osd:
if o == 'rabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económico' or o == 'trabajo decente y crecimiento económic':
o = 'trabajo decente y crecimiento económico'
if o == 'igualdad de género' or o == 'gualdad de género' or o == 'igualdad de genero':
o = 'igualdad de género'
if o == 'industria, innovación e infraestructuras' or o == 'industria, innovación e infraestructura':
o = 'industria, innovación e infraestructuras'
if o == 'paz, justicia e instituciones solidas' or o == 'paz, justicia e instituciones sólidas' or o == 'paz, justicia e instituciones sólida':
o = 'paz, justicia e instituciones sólidas'
if 'producción y consumo' in o:
o = 'producción y consumo responsable'
if o == 'ciudades y comunidades sostenibles' or o == 'ciudades y comunidades sostenible' or o == 'ciudades y comunidades sostenible':
o = 'ciudades y comunidades sostenibles'
if o == 'alianzas para lograr los objetivos' or o == 'alianza para lograr los objetivos':
o = 'alianza para lograr los objetivos'
if o == 'reducción de desigualdade' or o == 'reducción de las desigualdades' or o == 'reducción de desigualdades':
o = 'reducción de desigualdades'
if o == 'vida de ecosistemas terrestres' or o == 'vida de ecosistemas terrestre':
o = 'vida de ecosistemas terrestres'
o = o.strip()
list_osd2.append(o.lower())
class_ods_por.append(list_osd2)
gramas_por['ODS'] = class_ods_por
"""
Elimina las tildes
"""
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("Á", "A"),
("É", "E"),
("Í", "I"),
("Ó", "O"),
("Ú", "U")
)
for a, b in replacements:
s = s.replace(a, b)
return s
"""
Crea matriz de tokens en textos
"""
txt_inv = bdd['ID Proyecto'].tolist()
entidad = bdd['País'].tolist()
entidad.index('Brasil')
gramas_esp = gramas_esp[gramas_esp['ODS'].isnull() == False]
path_base = '/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/2. Textos Geneales'
# matriz = pd.read_csv('/home/duban/Workspace/Analisis-Scrapping-Convocatorias-Clacso/data/Matriz_Clasificación_ODS.csv')
matriz = pd.DataFrame()
n = 0
for i in txt_inv:
n+=1
print(str(n * 100 / len(txt_inv)))
print(n)
txt = open(path_base + '/' + i , 'r')
txt = str(normalize(txt.read())).replace('\n', ' ').split('.')
## Va Palabra por palabra
"""
Define variables por ODS
"""
pobreza = ''
pobreza_num= 0
hambre = ''
hambre_num = 0
salud = ''
salud_num = 0
educacion = ''
educacion_num = 0
genero = ''
genero_num = 0
agua = ''
agua_num = 0
energia = ''
energia_num = 0
trabajo = ''
trabajo_num = 0
industria = ''
industria_num = 0
desigualdades = ''
desigualdades_num = 0
sostenibles = ''
sostenibles_num = 0
producción_consumo = ''
producción_consumo_num = 0
clima = ''
clima_num = 0
submarina = ''
submarina_num = 0
terrestres = ''
terrestres_num = 0
paz = ''
paz_num = 0
alianza = ''
alianza_num = 0
if entidad[txt_inv.index(i)] != 'Brasil':
for t in range(len(txt)):
i_split = txt[t].split()
for grama in i_split:
grama = str(grama).lower()
if grama in gramas_esp['Gramas'].tolist() and grama.isalpha() and grama.isdigit() == False:
for id_token in range(len(gramas_esp)):
if grama == gramas_esp['Gramas'][id_token]:
if 'educación de calidad' in gramas_esp['ODS'][id_token]:
educacion = educacion + txt[t]+ '\n'
educacion_num +=1
if 'fin de la pobreza' in gramas_esp['ODS'][id_token]:
pobreza = pobreza + txt[t]+'\n'
pobreza_num +=1
if 'salud y bienestar' in gramas_esp['ODS'][id_token]:
salud = salud + txt[t]+'\n'
salud_num +=1
if 'igualdad de género' in gramas_esp['ODS'][id_token]:
genero = genero + txt[t]+'\n'
genero_num +=1
if 'agua limpia y saneamiento' in gramas_esp['ODS'][id_token]:
agua = agua + txt[t]+'\n'
agua_num +=1
if 'energía asequible y no contaminante' in gramas_esp['ODS'][id_token]:
energia = energia + txt[t]+'\n'
energia_num +=1
if 'trabajo decente y crecimiento económico' in gramas_esp['ODS'][id_token]:
trabajo = trabajo + txt[t]+'\n'
trabajo_num +=1
if 'industria, innovación e infraestructuras' in gramas_esp['ODS'][id_token]:
industria = industria + txt[t]+'\n'
industria_num+=1
if 'reducción de desigualdades' in gramas_esp['ODS'][id_token]:
desigualdades = desigualdades + txt[t]+'\n'
desigualdades_num +=1
if 'ciudades y comunidades sostenibles' in gramas_esp['ODS'][id_token]:
sostenibles = sostenibles + txt[t]+'\n'
sostenibles_num +=1
if 'producción y consumo responsable' in gramas_esp['ODS'][id_token]:
producción_consumo = producción_consumo + txt[t]+'\n'
producción_consumo_num +=1
if 'acción por el clima' in gramas_esp['ODS'][id_token]:
clima = clima + txt[t]+'\n'
clima_num +=1
if 'vida submarina' in gramas_esp['ODS'][id_token]:
submarina = submarina + txt[t]+'\n'
submarina_num +=1
if 'vida de ecosistemas terrestres' in gramas_esp['ODS'][id_token]:
terrestres = terrestres + txt[t]+'\n'
terrestres_num +=1
if 'paz, justicia e instituciones sólidas' in gramas_esp['ODS'][id_token]:
paz = paz + txt[t]+'\n'
paz_num +=1
if 'alianza para lograr los objetivos' in gramas_esp['ODS'][id_token]:
alianza = alianza + txt[t]+'\n'
alianza_num+=1
if 'hambre cero' in gramas_esp['ODS'][id_token]:
hambre = hambre + txt[t]+'\n'
hambre_num+=1
else:
continue
registro = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from matplotlib import pyplot as plt
from fbprophet import Prophet
from .models import TransactionHistory
def post_data_clean(temp_df, periodicity='D'):
if periodicity == 'H':
temp_df['ds'] = pd.to_datetime(temp_df['ds']).dt.strftime('%-I %p %A %d')
elif periodicity == 'M':
temp_df['ds'] = pd.to_datetime(temp_df['ds']).dt.strftime('%b %Y')
else:
temp_df['ds'] = pd.to_datetime(temp_df['ds']).dt.strftime('%d %b')
temp_df = temp_df.drop([
'additive_terms', 'additive_terms_lower', 'additive_terms_upper',
'multiplicative_terms', 'multiplicative_terms_lower', 'multiplicative_terms_upper'
], axis=1)
labels = temp_df.ds.values
temp_df = temp_df.round(2)
indexList = list(temp_df.columns)
ele = indexList.pop(-1)
indexList.insert(1, ele)
temp_df = temp_df[indexList]
return [labels, temp_df]
class TimeSeriesModel:
def __init__(self, raw_material_id, store_id, future_period):
self.df = ''
self.prediction_size = future_period
self.error_log = {}
self.get_data(raw_material_id, store_id)
self.pre_data_clean()
def get_data(self, raw_material_id, store_id):
self.df = TransactionHistory.objects.filter(rawMaterial_id_id=raw_material_id, storeId_id=store_id) \
.to_dataframe()
def pre_data_clean(self, with_time=False):
raw_df = self.df
if with_time:
raw_df['dateTime'] = | pd.to_datetime(df['dateTime']) | pandas.to_datetime |
from keras.layers import Bidirectional, Input, LSTM, Dense, Activation, Conv1D, Flatten, Embedding, MaxPooling1D, Dropout
#from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras import optimizers
from keras.models import Sequential, Model
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from sklearn.utils import shuffle
import pickle
from sklearn.model_selection import train_test_split
import re
from sklearn.utils import shuffle
import keras
import joblib
import tokenizer_util as tu
import os.path
from keras.callbacks import Callback
from keras import backend as K
TRAIN_FILE_PATH = 'train.csv'#'/data/train.csv'
TEST_FILE = 'test.csv'#'/data/test.csv'
TIME_STEPS = 300
BATCH_SIZE = 256
LEARNING_RATE = 0.01
DECAY = 0.25
EPOCH_SIZE = 10
TOKENIZER_FILE = 'tokenizer'
EMBEDDING_FILE = 'embedding'
TENSORFLOW_LOGDIR = 'logs'#'/output/tensorboard_logs'
MODEL_SAVE_PATH = 'models/best_model_new.h5' #'/output/best_model.h5'
OUTPUT_FILENAME = 'first_submission.csv'
def main():
df = pd.read_csv(TRAIN_FILE_PATH)
pred_cols = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
df['total_classes'] = df['toxic']+df['severe_toxic']+df['obscene']+df['threat']+df['insult']+df['identity_hate']
df = tu.clean_up(df)
comment_list = df['comment_text'].tolist()
max_len = TIME_STEPS
comment_list.append("unk")
n_classes = 1
tokenizer = joblib.load(TOKENIZER_FILE)
final_emb_matrix = joblib.load(EMBEDDING_FILE)
class_count = []
for col in pred_cols:
class_count.append((col,len(df[df[col]==1])))
print (class_count)
train, test = train_test_split(df, test_size=0.10, random_state=42)
train.head()
XTrain = tokenizer.texts_to_sequences(train.astype(str)['comment_text'].tolist())
XVal = tokenizer.texts_to_sequences(test.astype(str)['comment_text'].tolist())
YTrain = np.array(train[['toxic','severe_toxic','obscene','threat','insult','identity_hate']])
YVal = np.array(test[['toxic','severe_toxic','obscene','threat','insult','identity_hate']])
train.tail()
model = get_model_soft_sharing_lstm_singleoutput(final_emb_matrix, TIME_STEPS, learning_rate=LEARNING_RATE, n_classes=6, decay=DECAY)
if os.path.isfile(MODEL_SAVE_PATH):
print("Loading weights from existing path: {0}".format(MODEL_SAVE_PATH))
model.load_weights(MODEL_SAVE_PATH)
callbacks_list = define_callbacks()
model.fit(pad_sequences(XTrain, TIME_STEPS),YTrain ,batch_size=BATCH_SIZE, epochs=EPOCH_SIZE, verbose=1, validation_data=(pad_sequences(XVal, TIME_STEPS), YVal), callbacks=callbacks_list)
model.load_weights(MODEL_SAVE_PATH)
model.evaluate(pad_sequences(XVal, TIME_STEPS), YVal, batch_size=BATCH_SIZE)
test_df = pd.read_csv(TEST_FILE)
test_df = tu.clean_up(test_df)
test_comments = test_df['comment_text'].astype(str).tolist()
XTest = tokenizer.texts_to_sequences(test_comments)
print (test_df.columns)
test_df.head()
predictions = model.predict(pad_sequences(XTest, TIME_STEPS))
predicted_df = | pd.DataFrame(columns=['id','toxic','severe_toxic','obscene','threat','insult','identity_hate']) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = | pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"]) | pandas.DataFrame |
import os
import math
import warnings
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import shutil as sh
from glob import glob
from PIL import Image
from copy import copy
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
import libs.dirs as dirs
import libs.commons as commons
import libs.utils as utils
from libs.index import IndexManager
from libs.get_frames_class import GetFramesFull
# User Input
def get_input_network_type(net_type_dict, message="network"):
'''
Select bewteen reference or semiauto network/dataset
'''
dictLen = len(net_type_dict)
print("\nEnter {} type code from list:\n".format(message))
print("Code\tName")
for i in range(dictLen):
print("{}:\t{}".format(i, net_type_dict[i]))
input_code = int(input())
if input_code < dictLen:
target_net = net_type_dict[input_code]
else:
target_net = "UNKNOWN"
while target_net not in net_type_dict.values():
input_code = input("Unknown network. Please select a network from the list.\n")
try:
input_code = int(input_code)
except ValueError:
continue
if input_code < dictLen:
target_net = net_type_dict[input_code]
return target_net
def get_input_target_class(net_class_dict):
'''
Get user input of net target class. Applicable to rede3 only.
'''
classLen = len(net_class_dict)
print("Enter the target class code from list:\n")
print("Code\tClass name")
for i in range(classLen):
print("{}:\t{}".format(i, net_class_dict[i]))
input_class_code = int(input())
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
else:
event_class = "UNKNOWN"
while event_class not in net_class_dict.values():
input_class_code = input("Unknown class. Please select a class from the list.\n")
try:
input_class_code = int(input_class_code)
except ValueError:
continue
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
return event_class
# Reports and logging
def get_class_counts(index, class_column, pos_label, neg_label):
'''
Returns index class counts according to input labels.
index: pandas DataFrame
DataFrame with the elements to be counted.
pos_label, neg_label: any object or list
labels to be compared to elements of index[class_column]. If any neg_label
is None, the count of negative elements will be <Total index size> - <positive count>.
'''
if isinstance(pos_label, str) or not(hasattr(pos_label, "__iter__")):
pos_label = [pos_label]
if isinstance(neg_label, str) or not(hasattr(neg_label, "__iter__")):
neg_label = [neg_label]
posCount = 0
for label in pos_label:
posCount += index.groupby(class_column).get_group(label).count()[0]
negCount = 0
for label in neg_label:
if label is None:
negCount = index.shape[0] - posCount
break
# Else, count normally
negCount += index.groupby(class_column).get_group(label).count()[0]
return posCount, negCount
def get_net_class_counts(index_path, net, target_class=None):
'''
Chooses correct class labels to use in a get_class_counts function call
according to input net and target_class.
'''
assert Path(index_path).is_file(), "Index path does not exist."
index = remove_duplicates(pd.read_csv(index_path, low_memory=False), "FrameHash")
if (net == 3) and (target_class not in commons.rede3_classes.values()):
raise ValueError("Net 3 requires a valid target_class.")
if net == 1:
classColumn = "rede1"
posLabel = commons.rede1_positive
negLabel = commons.rede1_negative
mask = None
elif net ==2:
classColumn = "rede2"
posLabel = commons.rede2_positive
negLabel = commons.rede2_negative
mask = (index["rede1"] == commons.rede1_positive)
elif net == 3:
classColumn = "rede3"
posLabel = target_class
negLabel = None
mask = (index["rede2"] == commons.rede2_positive)
if mask is not None:
# Pass only relevant fraction of index to get_class_counts
index = index.loc[mask, :]
# Translate to binary classes
index[classColumn] = translate_labels(index[classColumn], classColumn)
return get_class_counts(index, classColumn, posLabel, negLabel)
def save_seed_log(log_path, seed, id_string):
# Save sample seed
if Path(log_path).is_file():
f = open(log_path, 'a')
else:
f = open(log_path, 'w')
f.write("{}\n{}\n".format(id_string, seed))
f.close()
def get_loop_stats(loop_folder): # TODO: Finish function
statsDf = pd.DataFrame()
return statsDf
def make_report(report_path, sampled_path, manual_path, automatic_path, prev_unlabeled_path,
train_info, rede=1, target_class=None, show=False):
sampledIndex = pd.read_csv(sampled_path)
manualIndex = pd.read_csv(manual_path)
autoIndex = pd.read_csv(automatic_path)
prevUnlabelIndex = pd.read_csv(prev_unlabeled_path)
# Get report information
numUnlabel = prevUnlabelIndex.shape[0]
numSampled = sampledIndex.shape[0]
sampledNaoDuto = 0
if rede == 1:
sampledNaoDuto = sampledIndex.groupby("rede1").get_group("Confuso").count()[0]+\
sampledIndex.groupby("rede1").get_group("Nada").count()[0]
sampledDuto = sampledIndex.groupby("rede1").get_group(commons.rede1_positive).count()[0]
sampledNaoEvento = 0
sampledEvento = sampledIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0]
if rede < 3:
sampledNaoEvento = sampledIndex.groupby("rede2").get_group(commons.rede2_negative).count()[0]
sampledTotal = sampledDuto + sampledNaoDuto
naoDutoPercent = sampledNaoDuto/sampledTotal*100
dutoPercent = sampledDuto/sampledTotal*100
eventoPercent = sampledEvento/sampledTotal*100
naoEventoPercent = sampledNaoEvento/sampledTotal*100
if rede == 1:
negLabelName = commons.rede1_negative
posLabelName = commons.rede1_positive
cumNeg = manualIndex.groupby("rede1").get_group('Nada').count()[0]+\
manualIndex.groupby("rede1").get_group('Confuso').count()[0]
cumPos = manualIndex.groupby("rede1").get_group(commons.rede1_positive).count()[0]
# Exception for case where there are no positive or negative images automatically annotated
if commons.rede1_negative in set(autoIndex['rede1'].values):
autoNeg = autoIndex.groupby("rede1").get_group(commons.rede1_negative).count()['rede1']
else:
autoNeg = 0
if commons.rede1_positive in set(autoIndex['rede1'].values):
autoPos = autoIndex.groupby("rede1").get_group(commons.rede1_positive).count()['rede1']
else:
autoPos = 0
elif rede == 2:
negLabelName = commons.rede2_negative
posLabelName = commons.rede2_positive
cumNeg = manualIndex.groupby("rede2").get_group(commons.rede2_negative).count()[0]
cumPos = manualIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0]
# Exception for case where there are no positive or negative images automatically annotated
if commons.rede2_negative in set(autoIndex['rede2'].values):
autoNeg = autoIndex.groupby("rede2").get_group(commons.rede2_negative).count()['rede2']
else:
autoNeg = 0
if commons.rede2_positive in set(autoIndex['rede2'].values):
autoPos = autoIndex.groupby("rede2").get_group(commons.rede2_positive).count()['rede2']
else:
autoPos = 0
elif rede == 3:
negLabelName = "Nao"+target_class
posLabelName = target_class
sampledClassPos = sampledIndex.groupby("rede3").get_group(posLabelName).count()[0]
sampledClassNeg = sampledIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - sampledClassPos
sampledTotal = sampledIndex.shape[0]
sampleNegPercent = sampledClassNeg/sampledTotal*100
samplePosPercent = sampledClassPos/sampledTotal*100
cumPos = manualIndex.groupby("rede3").get_group(posLabelName).count()[0]
cumNeg = manualIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - cumPos
# Exception for case where there are no positive or negative images automatically annotated
if posLabelName in set(autoIndex['rede3'].values):
autoPos = autoIndex.groupby("rede3").get_group(posLabelName).count()['rede3']
else:
autoPos = 0
autoNeg = autoIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - autoPos
cumTotal = cumPos + cumNeg
cumNegPercent = cumNeg/cumTotal*100
cumPosPercent = cumPos/cumTotal*100
autoLabel = autoIndex.shape[0]
autoLabelPercent = autoLabel/numUnlabel*100
# Compose manual image distribution string
distributionString = "Manual annotation distribution:\n"
if (rede == 1) or (rede == 2):
distributionString +=\
"NaoDuto: {} images ({:.2f} %)\n\
Duto: {} images ({:.2f} %)\n\
NaoEvento {} images ({:.2f} %)\n\
Evento: {} images ({:.2f} %)\n\
Total: {} images (100%)\n".format(sampledNaoDuto, naoDutoPercent, sampledDuto, dutoPercent,
sampledNaoEvento, naoEventoPercent, sampledEvento, eventoPercent,
sampledTotal)
if rede == 3:
distributionString +=\
"{}:\t{} images ({:.2f} %)\n\
{}:\t\t{} images ({:.2f} %)\n\
Total\t\t{} images (100 %)\n".format(posLabelName, sampledClassPos, samplePosPercent,
negLabelName, sampledClassNeg, sampleNegPercent,
sampledTotal)
# Assemble report string
reportString = "Rede{}.\n{} unlabeled images remain. Sampled {} images for manual annotation.\n".format(rede,
numUnlabel, numSampled)+\
distributionString+\
"Cumulative manual annotation distribution:\n\
{}: {} images ({:.2f} %)\n\
{}: {} images ({:.2f} %)\n\
Total: {} images (100%)\n".format(negLabelName, cumNeg, cumNegPercent,
posLabelName, cumPos, cumPosPercent, cumTotal)+\
"Train Hyperparams:\n\
Num Epochs: {}\n\
Batch Size: {}\n\
Optimizer: Adam\n\
Train Results:\n\
Elapsed Time: {}m\n\
Best val loss: {:.4f}\n\
Best val accuracy: {:.2f} %\n".format(1,2,3,4,5)+\
"Thresholds val (99% pos ratio):\n\
Upper 99% positive ratio: {:.4f}, {:.2f} % ground truth positives\n\
Lower 1% positive ratio: {:.4f}, {:.2f} % ground truth positives\n\
Validation: {}/{} = {:.2f} % images annotated\n\
Automatic Annotation:\n\
Imgs Positivas: {}; Imgs Negativas: {}\n\
{}/{} = {:.2f} % imagens anotadas automaticamente\n".format(1.,2.,3.,4.,5.,6.,7., autoPos, autoNeg,
autoLabel,numUnlabel, autoLabelPercent)
# TODO: Add train info
# Write report
# with open(report_path, 'w') as f:
# f.write(reportString)
utils.write_string(reportString, report_path, mode='w')
if show:
print(reportString)
return reportString
# Automatic labeling
def automatic_labeling(outputs, outputs_index, unlabeled_index, upper_thresh, lower_thresh, rede,
target_class=None, verbose=True):
'''
Return a DataFrame whose entries are taken from unlabeled_index according to calculated indexes.
The indexes are chosen so that their outputs are either above the upper threshold or below the lower.
'''
upperIndexes, lowerIndexes = get_auto_label_indexes(outputs, outputs_index, upper_thresh,
lower_thresh, verbose=True)
autoIndex = get_classified_index(unlabeled_index, upperIndexes, lowerIndexes, rede,
index_col="FrameHash", target_class=target_class, verbose=False)
return autoIndex
def get_auto_label_indexes(outputs, outputs_index, upper_thresh, lower_thresh, verbose=True):
datasetLen = len(outputs)
indexes = outputs_index
upperIndexes = indexes[np.greater(outputs, upper_thresh)]
lowerIndexes = indexes[np.less(outputs, lower_thresh)]
totalClassified = len(upperIndexes) + len(lowerIndexes)
if verbose:
print("\nIdeal Upper Threshold: ", upper_thresh)
print("Ideal Lower Threshold: ", lower_thresh)
print("\nImages in:")
print("upperIndexes: ", len(upperIndexes))
print("lowerIndexes: ", len(lowerIndexes))
print("\nImages automatically labeled: {}/{} = {:.2f} %".format(totalClassified, datasetLen,
(totalClassified)/datasetLen*100))
return upperIndexes, lowerIndexes
def get_classified_index(index, pos_hashes, neg_hashes, rede, target_class=None, index_col="FrameHash",
verbose=True):
'''
Create new auto labeled index from the unlabeled_images index and positive and negative indexes
lists.
'''
if index_col is not None:
index.set_index("FrameHash", drop=False, inplace=True)
if rede >= 1:
positiveLabel1 = commons.rede1_positive
negativeLabel1 = commons.rede1_negative
if rede >= 2:
positiveLabel2 = commons.rede2_positive
negativeLabel2 = commons.rede2_negative
if rede >= 3:
assert target_class in commons.rede3_classes.values(), "Unknown target_class value."
positiveLabel3 = target_class
newPositives = index.reindex(labels=pos_hashes, axis=0, copy=True)
newNegatives = index.reindex(labels=neg_hashes, axis=0, copy=True)
datasetLen = len(index)
lenPositives = len(newPositives)
lenNegatives = len(newNegatives)
# Set positive and negative class labels
if rede == 1:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [negativeLabel1]*lenNegatives
if rede == 2:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [positiveLabel1]*lenNegatives
newPositives["rede2"] = [positiveLabel2]*lenPositives
newNegatives["rede2"] = [negativeLabel2]*lenNegatives
if rede == 3:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [positiveLabel1]*lenNegatives
newPositives["rede2"] = [positiveLabel2]*lenPositives
newNegatives["rede2"] = [positiveLabel2]*lenNegatives
newPositives["rede3"] = [positiveLabel3]*lenPositives
newLabeledIndex = pd.concat([newPositives, newNegatives], axis=0, sort=False)
# Pra que isso de novo?
if rede == 2:
newPositives["rede1"] = [positiveLabel1]*lenPositives
if verbose:
print(newLabeledIndex.shape)
print("Unlabeled images: ", datasetLen)
print("New pos labels: ", lenPositives)
print("New neg labels: ", lenNegatives)
print("Total new labels: ", lenPositives+lenNegatives)
print("New labels len: ", newLabeledIndex.shape)
print("\nAutomatic anotation of {:.2f} % of input images.".format(len(newLabeledIndex)/datasetLen*100))
return newLabeledIndex
## Threshold finding
def compute_thresholds(val_outputs, labels,
upper_ratio=0.95,
lower_ratio=0.01,
resolution=0.001,
val_indexes=None):
val_outputs = np.squeeze(utils.normalize_array(val_outputs))
val_outputs = val_outputs[:, 0]
resBits = len(str(resolution)) -2
# Maximum resolution is to test a threshold on all output values
if resolution == 'max':
upperThreshList = np.sort(val_outputs)
lowerThreshList = copy(upperThreshList)[::-1]
else:
lowerThreshList = np.arange(0., 1., resolution)
upperThreshList = np.arange(1., 0., -resolution)
# upperThreshList = np.arange(0., 1., resolution)
# lowerThreshList = np.arange(1., 0., -resolution)
# Find upper threshold
idealUpperThresh = find_ideal_upper_thresh(
val_outputs, labels, upperThreshList, ratio=upper_ratio)#, verbose=True)
# Find lower threshold
idealLowerThresh = find_ideal_lower_thresh(
val_outputs, labels, lowerThreshList, ratio=lower_ratio)
idealLowerThresh = np.around(idealLowerThresh, decimals=resBits)
idealUpperThresh = np.around(idealUpperThresh, decimals=resBits)
## If thresholds break, take the mean value
## TODO: Instead of choosing the mean, choose a thresh that maximizes AUC
# if idealUpperThresh < idealLowerThresh:
# meanThresh = (idealUpperThresh+idealLowerThresh)/2
# idealUpperThresh = meanThresh
# idealLowerThresh = meanThresh
if val_indexes is not None:
get_auto_label_indexes(val_outputs, val_indexes, idealUpperThresh, idealLowerThresh, verbose=True)
return idealUpperThresh, idealLowerThresh
def upper_positive_relative_ratio(outputs, labels, threshold):
'''
Compute ratio of ground truth positive examples above given threshold relative only
to the examples above the threshold.
'''
datasetLen = len(outputs)
mask = np.greater(outputs, threshold)
indexes = np.arange(datasetLen)[mask]
if len(indexes) > 0:
posPercent = np.sum(labels[indexes] == 0)/len(indexes) # Positive class index is 0
else:
return 1.
return posPercent
def lower_positive_ratio(outputs, labels, threshold):
'''
Compute ratio of ground truth positive examples below a given threshold relative
to the entire dataset.
'''
datasetLen = len(outputs)
mask = np.less(outputs, threshold)
indexes = np.arange(datasetLen)[mask]
if len(indexes) > 0:
posPercent = np.sum(labels[indexes] == 0)/datasetLen # Positive class index is 0
else:
return 0.
return posPercent
def find_ideal_lower_thresh(outputs, labels, threshold_list=None, ratio=0.01, resolution=0.001, verbose=False):
if verbose:
print("\nThreshold\tLower Pos Ratio")
if threshold_list is None:
threshold_list = np.arange(0., 1., resolution)
for i in tqdm(range(len(threshold_list))):
lowerThresh = threshold_list[i]
posRatio = lower_positive_ratio(outputs, labels, lowerThresh)
if verbose:
print("{:.2f}\t\t{:.2f}".format(lowerThresh, posRatio)) # Print search progress
if (posRatio > ratio) and (ratio > 0.):
if i-1 < 0:
print("\nThreshold could not be found.")
return None
idealThresh = threshold_list[i-1]
posRatio = lower_positive_ratio(outputs, labels, idealThresh)
print("\nFound ideal Lower threshold {:.3f} with {:.2f} % ground truth positives.".format(idealThresh, posRatio*100))
return idealThresh
def find_ideal_upper_thresh(outputs, labels, threshold_list=None, ratio=0.95, resolution=0.001, verbose=False):
if verbose:
print("\nThreshold\tUpper Pos Ratio")
if threshold_list is None:
threshold_list = np.arange(1., 0., -resolution)
for i in tqdm(range(len(threshold_list))):
upperThresh = threshold_list[i]
posRatio = upper_positive_relative_ratio(outputs, labels, upperThresh)
if verbose:
print("{:.2f}\t\t{:.2f}".format(upperThresh, posRatio)) # Print search progress
if (posRatio < ratio) and (ratio < 1.):
if i-1 < 0:
print("\nThreshold could not be found.")
return None
idealThresh = threshold_list[i-1]
posRatio = upper_positive_relative_ratio(outputs, labels, idealThresh)
print("\nFound ideal Upper threshold {:.3f} with {:.2f} % ground truth positives.".format(idealThresh, posRatio*100))
return idealThresh
## Dataset files manipulation
def df_to_csv(dataframe, save_path, verbose=True):
dirs.create_folder(Path(save_path).parent)
dataframe.to_csv(save_path, index=False)
if verbose:
print("Saved DataFrame to ", save_path)
def get_ref_dataset_val_video_list(folder_path, verbose=False):
'''
Get a list of video hashes from a dataset folder with a specific file tree.
folder_path/
xxx/hash1/
yyy/hash2/
...
Returns non-duplicated list of found hashes.
'''
globString = str(folder_path)+"/**"
folderList = glob(globString, recursive=True)
videoList = []
for pathEntry in folderList:
relString = Path(pathEntry).relative_to(folder_path)
if len(relString.parts) == 2:
videoHash = relString.parts[-1]
videoList.append(videoHash)
videoList = list(set(videoList))
return videoList
def split_validation_set_from_video_list(df_path, index_list, key_column="HashMD5", verbose=False):
'''
Split a DataFrame given by df_path in two, according to index_list. The DataFrame is split in
two other: one containing only entries with indexes in index_list; the other is the converse,
containing none of the given indexes.
Arguments:
df_path: str filepath
Filepath to target DataFrame saved in csv format.
index_list: list
List of indices to guide the split. One split set will contain only entries with indexes
in this list and the other set will contain the remaining entries.
key_column: str
Name of the DataFrame column where the indexes of index_list will be searched.
Returns:
trainIndex: DataFrame
DataFrame subset from input DataFrame. Contains only entries with indexes not present in
index_list.
valIndex: DataFrame
DataFrame subset from input DataFrame. Contains only entries with indexes present in
index_list.
'''
index = pd.read_csv(df_path)
index.dropna(axis=0, subset=[key_column], inplace=True)
valHash = index_list
trainHash = set(index[key_column]) - set(valHash)
# valHash = utils.compute_file_hash_list(index_list)
index.set_index(key_column, drop=False, inplace=True)
trainIndex = index.loc[trainHash, :].copy()
valIndex = index.loc[valHash, :].copy()
trainIndex.reset_index(inplace=True, drop=True)
valIndex.reset_index(inplace=True, drop=True)
return trainIndex, valIndex
def merge_indexes(index_path_list, key_column):
'''
Read a list of DataFrame paths, concatenates them and remove duplicated elements from resulting DF.
'''
# assert (len(index_path_list) >= 2) and \
# not(isinstance(index_path_list, str)), \
# "Argument index_path_list must be a list of two or more DataFrame paths."
assert hasattr(index_path_list, "__iter__") and \
not(isinstance(index_path_list, str)), \
"Argument index_path_list must be a list of two or more DataFrame paths."
indexListNoDups = [remove_duplicates(pd.read_csv(x), key_column) for x in index_path_list]
if len(indexListNoDups) > 1:
newIndex = pd.concat(indexListNoDups, axis=0, sort=False)
else:
newIndex = indexListNoDups[0]
newIndex = remove_duplicates(newIndex, key_column)
return newIndex
def start_loop(prev_annotated_path, target_class, target_column, verbose=True):
'''
Splits previous annotated image index in auto and manual labeled indexes.
Creates first iteration folder.
'''
iter1Folder = Path("/".join(prev_annotated_path.parts[:-2])) / "iteration_1"
newUnlabeledPath = Path(prev_annotated_path).with_name("unlabeled_images_iteration_0.csv")
newReferencePath = Path(prev_annotated_path).with_name("reference_images.csv")
newLabeledPath = iter1Folder / "sampled_images_iteration_1.csv"
dirs.create_folder(iter1Folder)
prevAnnotated = pd.read_csv(prev_annotated_path)
# Create nextLevelIndex df with only images that have been annotated as target_class in the
# previous iteration. Save as reference index for this loop
mask = prevAnnotated[target_column] == target_class
nextLevelIndex = prevAnnotated.loc[mask, :]
nextLevelIndex = remove_duplicates(nextLevelIndex, "FrameHash", verbose=True)
nextLevelIndex.to_csv(newReferencePath, index=False)
# New unlabeled set unlabeled_images_iteration_0 is actually composed of all images
# newUnlabeled = nextLevelIndex.copy()
newUnlabeled = nextLevelIndex.groupby("Annotation").get_group('auto') # To get only auto annotated images
# Save manual labeled images as sampled_images for first iteration
newLabeled = nextLevelIndex.groupby("Annotation").get_group('manual')
newUnlabeled.to_csv(newUnlabeledPath, index=False)
newLabeled.to_csv(newLabeledPath, index=False)
if verbose:
print("Annotated last level: ", prevAnnotated.shape)
print("To be used in current step: ", nextLevelIndex.shape)
print("Unlabeled: ", newUnlabeled.shape)
print("Labeled: ", newLabeled.shape)
return newUnlabeled, newLabeled
class IndexLoader:
'''
Iterator to load and transform an image and its file hash.
Construtctor arguments:
imagePathList: list of strings
label_list: list of ints
transform: Torchvision transform
Returns img, imgHash, label (optional)
img: Tensor of a Pillow Image
Torch tensor of a Pillow Image. The input transforms are applied and it has shape (channels, h, w)
imgHash: string
MD5 hash of input image.
label: int
Numeric class label associated with img. Will only be returned if InputLoader received label_list as input.
'''
def __init__(self, imagePathList, label_list=None, batch_size=4, transform=None):
self.imagePathList = imagePathList
self.batch_size = batch_size
self.transform = transform
self.label_list = label_list
self.current_index = 0
self.datasetLen = len(self.imagePathList)
if self.label_list is not None:
assert len(self.label_list) == self.datasetLen, "Image path and label lists must be of same size."
# TODO: (maybe) add default Compose transform with ToTensor
# and Transpose to return a Tensor image with shape (channel, width, height)
# if self.transform != None:
# self.transform = transforms.ToTensor()
def __len__(self):
return math.ceil((self.datasetLen - self.current_index) / self.batch_size)
def __iter__(self):
return self
def __next__(self):
while self.current_index+self.batch_size > self.datasetLen:
self.batch_size -= 1
if self.batch_size == 0:
raise StopIteration
imgList = []
imgHashList = []
labelList = []
for _ in range(self.batch_size):
imgHash = utils.file_hash(self.imagePathList[self.current_index])
img = Image.open(self.imagePathList[self.current_index])
if self.transform:
img = self.transform(img)
if self.label_list is not None:
label = self.label_list[self.current_index]
labelList.append(label)
imgList.append(img)
imgHashList.append(imgHash)
self.current_index += 1
imgList = torch.stack(imgList, dim=0)
if self.label_list is None:
return imgList, imgHashList
else:
return imgList, imgHashList, labelList
def index_complement(reference_df, to_drop_df, column_label):
'''
Drop rows from 'reference_df' DataFrame indicated by column_label
column of 'to_drop_df' DataFrame.
The operation performed can be interpreted as a set complement between reference and
to_drop DataFrames. Returns a DataFrame with length equal to (len(reference_df) - len(to_drop_df)).
'''
# Drop NaNs from allAnnotations; TODO: Find out how NaNs could appear in FrameHash column
print("Number of NaNs removed in final_annotated_images: ", to_drop_df[column_label].isna().sum())
if to_drop_df[column_label].isna().sum() > 10:
print("\nWarning! High number of NaNs! Check if everything is normal.\n")
to_drop_df.dropna(subset=[column_label], inplace=True)
reference_df.set_index(column_label, drop=False, inplace=True)
reference_df.drop(labels=to_drop_df[column_label], axis=0, inplace=True)
reference_df.reset_index(drop=True, inplace=True)
return reference_df.copy()
def load_outputs_df(outputPath, remove_duplicates=False, softmax=True):
'''
Load a pickled dictionary containing a set of outputs, image hashes and labels.
Each 3-uple corresponds to data of a single sample.
'''
pickleData = utils.load_pickle(outputPath)
if remove_duplicates:
pickleData = remove_duplicates(pickleData, "ImgHashes")
outputs = np.stack(pickleData["Outputs"])
imgHashes = pickleData["ImgHashes"]
labels = pickleData["Labels"]
if softmax: # TODO: Test both options
outputs = nn.Softmax(dim=1)(torch.as_tensor(outputs))
else:
outputs = torch.as_tensor(outputs)
return outputs.numpy(), imgHashes, labels
def copy_dataset_to_folder(index_path, dest_folder, path_column="FramePath", verbose=True):
'''
Move files to dest_folder. File paths are given in the path_column of a DataFrame
saved to index_path.
Files are read from source path and copied to dest_folder keeping the original filenames.
index_path: str filepath
Filepath of a DataFrame saved in csv format. DataFrame path_column field must contain
the valid filepaths.
dest_folder: str folder path
Path to destination folder.
path_column: str
Name of DataFrame field containing the source filepaths.
'''
def _add_folder_and_copy(x):
return utils.copy_files(Path(x), dest_folder / Path(x).name)
dirs.create_folder(dest_folder)
index = pd.read_csv(index_path, low_memory=False)
if verbose:
print("\nMoving {} files from dataset folder to sampled images folder...".format(len(index)))
successes = np.sum(index[path_column].map(_add_folder_and_copy))
if verbose:
print("{}/{} files copied.".format(successes, len(index[path_column])))
return successes
def fill_index_information(reference_index, to_fill_index, index_column, columns_to_keep):
reference_index.set_index(index_column, drop=False, inplace=True)
to_fill_index.set_index(index_column, drop=False, inplace=True)
complete_index = reference_index.loc[to_fill_index.index, :]
for col in columns_to_keep:
complete_index[col] = to_fill_index[col]
complete_index.reset_index(drop=True, inplace=True)
reference_index.reset_index(drop=True, inplace=True)
to_fill_index.reset_index(drop=True, inplace=True)
return complete_index.copy()
def merge_manual_auto_sets(auto_df, manual_df):
manual_df["Annotation"] = [commons.manual_annotation]*len(manual_df)
auto_df["Annotation"] = [commons.auto_annotation]*len(auto_df)
mergedIndex = | pd.concat([manual_df, auto_df], axis=0, sort=False) | pandas.concat |
import logging
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from active_learning_lab.utils.calibration import expected_calibration_error
METRIC_COLUMNS = [
'train_acc', 'train_micro_precision', 'train_micro_recall', 'train_micro_f1',
'train_macro_precision', 'train_macro_recall', 'train_macro_f1', 'train_ece_10',
'test_acc', 'test_micro_precision', 'test_micro_recall', 'test_micro_f1',
'test_macro_precision', 'test_macro_recall', 'test_macro_f1', 'test_ece_10'
]
COLUMNS = ['run_id', 'query_id', 'num_samples', 'query_time_sec', 'update_time_sec'] + \
METRIC_COLUMNS
class MetricsTracker(object):
NO_VALUE = -1
def __init__(self):
self.measured_metrics = | pd.DataFrame(columns=COLUMNS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
| pd.Timestamp('2011-01-04') | pandas.Timestamp |
import pandas as pd
from os import listdir,path
import csv
#接著就是,把各集合裡的gb file分好資料夾, V
# 然後再把資料夾裡的gb轉csv, V(直接取另一夾的,別浪費時間)
# 再合併成table就完工了
#Load_file_dir
kuofile="C:/PYTHON/Download_Seq_files/gb_csv/kuofile/Polypodiopsida/"
testo_total="C:/PYTHON/Download_Seq_files/gb_csv/testo_total/Polypodiopsida/"
keyword="C:/PYTHON/Download_Seq_files/gb_csv/keyword/Polypodiopsida/"
Save_file_tep="C:/PYTHON/Download_Seq_files/gb_and_table/merge/merge_template_unsorted.csv"
Save_file_dir="C:/PYTHON/Download_Seq_files/gb_and_table/merge/"
file_list={kuofile,testo_total,keyword}
for List in file_list:
filename=[]
files = listdir(List)
for f in files:
fullpath = List + f
if path.isfile(fullpath):
filename.append(f)
#print(filename)
#cols=['LOCUS', 'DEFINITION', 'ACCESSION', 'VERSION', 'KEYWORDS', 'SOURCE', 'ORGANISM', 'REFERENCE', 'AUTHORS', 'TITLE', 'JOURNAL', 'REFERENCE1', 'AUTHORS1', 'TITLE1', 'JOURNAL1', 'FEATURES', 'PUBMED', 'REMARK', 'REFERENCE12', 'AUTHORS12', 'TITLE12', 'JOURNAL12', 'COMMENT', 'PUBMED1', 'REMARK1', 'DBLINK']
#print(len(cols))
#cols_sorted=sorted(cols)
#print(cols_sorted)
#['ACCESSION', 'AUTHORS', 'AUTHORS1', 'AUTHORS12', 'COMMENT', 'DBLINK', 'DEFINITION', 'FEATURES', 'JOURNAL', 'JOURNAL1', 'JOURNAL12', 'KEYWORDS', 'LOCUS', 'ORGANISM', 'PUBMED', 'PUBMED1', 'REFERENCE', 'REFERENCE1', 'REFERENCE12', 'REMARK', 'REMARK1', 'SOURCE', 'TITLE', 'TITLE1', 'TITLE12', 'VERSION']
#https://officeguide.cc/python-sort-sorted-tutorial-examples/
LOCUS=[]
DEFINITION=[]
ACCESSION=[]
VERSION=[]
KEYWORDS=[]
SOURCE=[]
ORGANISM=[]
REFERENCE=[]
AUTHORS=[]
TITLE=[]
JOURNAL=[]
PUBMED=[]
REFERENCE1=[]
AUTHORS1=[]
TITLE1=[]
JOURNAL1=[]
FEATURES=[]
PUBMED1=[]
REMARK=[]
REFERENCE12=[]
AUTHORS12=[]
TITLE12=[]
JOURNAL12=[]
COMMENT=[]
PUBMED12=[]
REMARK1=[]
DBLINK=[]
#https://stackoverflow.com/questions/5757744/how-can-i-get-a-specific-field-of-a-csv-file
def read_cell(route,x, y):
with open(route, 'r') as f:
reader = csv.reader(f)
x_count = 0
for n in reader:
if x_count == x:
cell = n[y]
return cell
x_count += 1
#print (read_cell("C:/PYTHON/Download_Seq_files/gb_csv/testo_total/Polypodiopsida/KY932463_gb.csv",0, 3))
#譬如:"KY932463_gb.csv"第一行第四列叫"VERSION"
file_num=0
#for file in filename[5000:-4100]:
#for file in filename[4715:4730]:
for file in filename:
j=0
file_num=file_num+1
while j<28:#先填上有的
try:
if str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,0)):
LOCUS.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,1)):
DEFINITION.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,2)):
ACCESSION.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,3)):
VERSION.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,4)):
KEYWORDS.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,5)):
SOURCE.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,6)):
ORGANISM.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,7)):
REFERENCE.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,8)):
AUTHORS.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,9)):
TITLE.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,10)):
JOURNAL.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,11)):
REFERENCE1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,12)):
AUTHORS1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,13)):
TITLE1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,14)):
JOURNAL1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,15)):
FEATURES.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,16)):
PUBMED.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,17)):
REMARK.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,18)):
REFERENCE12.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,19)):
AUTHORS12.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,20)):
TITLE12.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,21)):
JOURNAL12.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,22)):
COMMENT.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,23)):
PUBMED1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,24)):
REMARK1.append(read_cell(List+"%s"%file,1,j))
elif str(read_cell(List+"%s"%file,0,j))==str(read_cell(Save_file_tep,0,25)):
DBLINK.append(read_cell(List+"%s"%file,1,j))
j=j+1
except:
#print(file,j,"error")
j=j+1
pass
if len(LOCUS)<file_num:
LOCUS.append("NaN")
if len(DEFINITION)<file_num:
DEFINITION.append("NaN")
if len(ACCESSION)<file_num:
ACCESSION.append("NaN")
if len(VERSION)<file_num:
VERSION.append("NaN")
if len(KEYWORDS)<file_num:
KEYWORDS.append("NaN")
if len(SOURCE)<file_num:
SOURCE.append("NaN")
if len(ORGANISM)<file_num:
ORGANISM.append("NaN")
if len(REFERENCE)<file_num:
REFERENCE.append("NaN")
if len(AUTHORS)<file_num:
AUTHORS.append("NaN")
if len(TITLE)<file_num:
TITLE.append("NaN")
if len(JOURNAL)<file_num:
JOURNAL.append("NaN")
if len(REFERENCE1)<file_num:
REFERENCE1.append("NaN")
if len(AUTHORS1)<file_num:
AUTHORS1.append("NaN")
if len(TITLE1)<file_num:
TITLE1.append("NaN")
if len(JOURNAL1)<file_num:
JOURNAL1.append("NaN")
if len(FEATURES)<file_num:
FEATURES.append("NaN")
if len(PUBMED)<file_num:
PUBMED.append("NaN")
if len(REMARK)<file_num:
REMARK.append("NaN")
if len(REFERENCE12)<file_num:
REFERENCE12.append("NaN")
if len(AUTHORS12)<file_num:
AUTHORS12.append("NaN")
if len(TITLE12)<file_num:
TITLE12.append("NaN")
if len(JOURNAL12)<file_num:
JOURNAL12.append("NaN")
if len(COMMENT)<file_num:
COMMENT.append("NaN")
if len(PUBMED1)<file_num:
PUBMED1.append("NaN")
if len(REMARK1)<file_num:
REMARK1.append("NaN")
if len(DBLINK)<file_num:
DBLINK.append("NaN")
#print()
LOCUS_x=pd.DataFrame(LOCUS).rename(columns = {0:'LOCUS'})
DEFINITION_x= | pd.DataFrame(DEFINITION) | pandas.DataFrame |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import numpy as np
import pandas as pd
import re
from collections import defaultdict, Counter
import collections
import copy
import os
import sys
import random
import logging
import argparse
def add_label(def_gold):
if def_gold == "yes":
return "entailment", "neutral"
elif def_gold == "unk":
return "neutral", "entailment"
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input_dir", nargs='?', type=str, help="input file")
parser.add_argument("--obj", action='store_true', help="object")
ARGS = parser.parse_args()
files = glob.glob(ARGS.input_dir+"/*")
sentences = []
for fi in files:
print(fi)
if re.search("all", fi):
continue
if not re.search("(yes|unk)", fi):
continue
def_gold = re.search("(yes|unk)", fi).group(1)
def_label, rev_label = add_label(def_gold)
pat = re.compile("."+def_gold)
tmp = re.sub(pat, '', os.path.basename(fi))
origenre = re.sub('.txt', '', tmp)
with open(fi, "r") as f:
for line in f:
genre = origenre
s1, s2 = line.split("\t")
if re.search("emptydet", s1):
s1 = re.sub("emptydet ", "several ", s1)
s2 = re.sub("emptydet ", "several ", s2)
genre = genre+".empty"
s1 = s1[0].upper() + s1[1:]
s1 = s1.strip()+"."
s2 = s2[0].upper() + s2[1:]
s2 = s2.strip()+"."
sentences.append([genre, s1, s2, def_label])
sentences.append([genre, s2, s1, rev_label])
df = pd.DataFrame(sentences, columns=['genre', 'sentence1', 'sentence2', 'gold_label'])
df8 = df
train =pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])
train['index'] = df8.index
train['promptID'] = df8.index
train['pairID'] = df8.index
train['gold_label'] = df8["gold_label"]
train['genre'] = df8["genre"]
train['sentence1'] = df8["sentence1"]
train['sentence2'] = df8["sentence2"]
final_train = train.sample(frac=1)
final_train.to_csv(ARGS.input_dir+"/all_formatted.tsv", sep="\t", index=False)
if ARGS.obj:
pass
else:
depth0 = final_train.query('genre.str.contains("depth0")', engine='python')
depth0.to_csv(ARGS.input_dir+"/depth0.tsv", sep="\t", index=False)
depth1 = final_train.query('genre.str.contains("depth1")', engine='python')
depth1.to_csv(ARGS.input_dir+"/depth1.tsv", sep="\t", index=False)
depth2 = final_train.query('genre.str.contains("depth2")', engine='python')
depth2.to_csv(ARGS.input_dir+"/depth2.tsv", sep="\t", index=False)
depth3 = final_train.query('genre.str.contains("depth3")', engine='python')
depth3.to_csv(ARGS.input_dir+"/depth3.tsv", sep="\t", index=False)
depth4 = final_train.query('genre.str.contains("depth4")', engine='python')
depth4.to_csv(ARGS.input_dir+"/depth4.tsv", sep="\t", index=False)
sample_lex1_1 = depth0.query('genre.str.contains("empty")', engine='python')
rest_1 = depth0.query('not genre.str.contains("empty")', engine='python')
sample_lex1_2 = depth0.query('sentence1.str.contains("No ")', engine='python')
rest_2 = depth0.query('not sentence1.str.contains("No ")', engine='python')
allq_lex1_1_l = rest_1.query('genre.str.contains("lex.")', engine='python')
allq_lex1_2_l = rest_2.query('genre.str.contains("lex.")', engine='python')
rest_1_l = rest_1.query('not genre.str.contains("lex.")', engine='python')
rest_2_l = rest_2.query('not genre.str.contains("lex.")', engine='python')
allq_lex1_1_p = rest_1.query('genre.str.contains("pp.")', engine='python')
allq_lex1_2_p = rest_2.query('genre.str.contains("pp.")', engine='python')
rest_1_p = rest_1.query('not genre.str.contains("pp.")', engine='python')
rest_2_p = rest_2.query('not genre.str.contains("pp.")', engine='python')
rest_types = [[rest_1_l,sample_lex1_2,allq_lex1_2_l,sample_lex1_1,allq_lex1_1_l],
[rest_2_l,sample_lex1_2,allq_lex1_2_l,sample_lex1_1,allq_lex1_1_l],
[rest_1_p,sample_lex1_2,allq_lex1_2_p,sample_lex1_1,allq_lex1_1_p],
[rest_2_p,sample_lex1_2,allq_lex1_2_p,sample_lex1_1,allq_lex1_1_p]]
for i, rest_type in enumerate(rest_types):
#sampling lex_1
train = pd.concat([rest_type[1],rest_type[2]]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test = rest_type[0]
train.to_csv(ARGS.input_dir+"/lex_1_"+str(i)+".tsv", sep="\t", index=False)
test.to_csv(ARGS.input_dir+"/dev_matched_lex_1_"+str(i)+".tsv", sep="\t", index=False)
#1.{at least three, at most three}, {less than three, more than three},{a few, few}
#2.{a few, few}, {at least three, at most three}, {less than three, more than three}
at = test.query('sentence1.str.contains("At ")', engine='python')
than = test.query('sentence1.str.contains(" than ")', engine='python')
few = test.query('sentence1.str.contains("ew ")', engine='python')
rest = test.query('not sentence1.str.contains("At ") and not sentence1.str.contains(" than ") and not sentence1.str.contains("ew ")', engine='python')
lex_2 = pd.concat([rest_type[3],rest_type[4], at]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_2 = pd.concat([than, few, rest]).drop_duplicates().reset_index(drop=True)
lex_2.to_csv(ARGS.input_dir+"/lex_2_"+str(i)+"_1.tsv", sep="\t", index=False)
test_lex_2.to_csv(ARGS.input_dir+"/dev_matched_lex_2_"+str(i)+"_1.tsv", sep="\t", index=False)
lex_3 = pd.concat([rest_type[3],rest_type[4], at, than]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_3 = pd.concat([few, rest]).drop_duplicates().reset_index(drop=True)
lex_3.to_csv(ARGS.input_dir+"/lex_3_"+str(i)+"_1.tsv", sep="\t", index=False)
test_lex_3.to_csv(ARGS.input_dir+"/dev_matched_lex_3_"+str(i)+"_1.tsv", sep="\t", index=False)
lex_4 = pd.concat([rest_type[3],rest_type[4], at, than, few]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_4 = pd.concat([rest]).drop_duplicates().reset_index(drop=True)
lex_4.to_csv(ARGS.input_dir+"/lex_4_"+str(i)+"_1.tsv", sep="\t", index=False)
test_lex_4.to_csv(ARGS.input_dir+"/dev_matched_lex_4_"+str(i)+"_1.tsv", sep="\t", index=False)
lex_2 = pd.concat([rest_type[3],rest_type[4], few]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_2 = pd.concat([than, at, rest]).drop_duplicates().reset_index(drop=True)
lex_2.to_csv(ARGS.input_dir+"/lex_2_"+str(i)+"_2.tsv", sep="\t", index=False)
test_lex_2.to_csv(ARGS.input_dir+"/dev_matched_lex_2_"+str(i)+"_2.tsv", sep="\t", index=False)
lex_3 = pd.concat([rest_type[3],rest_type[4], few, at]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_3 = pd.concat([than, rest]).drop_duplicates().reset_index(drop=True)
lex_3.to_csv(ARGS.input_dir+"/lex_3_"+str(i)+"_2.tsv", sep="\t", index=False)
test_lex_3.to_csv(ARGS.input_dir+"/dev_matched_lex_3_"+str(i)+"_2.tsv", sep="\t", index=False)
lex_4 = pd.concat([rest_type[3],rest_type[4], few, than, at]).drop_duplicates().reset_index(drop=True).sample(frac=1)
test_lex_4 = | pd.concat([rest]) | pandas.concat |
import pandas as pd
from openpyxl import worksheet
def map_team_names(names_sheet: worksheet, games_sheet: worksheet):
mapped_names = []
missing_names = set()
names_last_row = names_sheet.max_row
for row in range(1, names_last_row):
team_name = names_sheet.cell(row, 1).value
if team_name:
mapped_names.append(team_name.upper())
games_last_row = games_sheet.max_row
for row in range(2, games_last_row):
visitor = games_sheet.cell(row, 7).value
home = games_sheet.cell(row, 12).value
if home and home.upper() not in mapped_names:
missing_names.add(home)
if visitor and visitor.upper() not in mapped_names:
missing_names.add(visitor)
if missing_names:
return missing_names
else:
return False
def format_name(name: str) -> str:
"""
Limpa espaços antes e depois da palavra
Nome em caps lock para evitar case sensitive
"""
name = name.strip()
name = name.upper()
return name
def export_output_file(teams: dict, output_file_name: str):
ranking_df = | pd.DataFrame(teams) | pandas.DataFrame |
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds electrical generators and existing hydro storage units to a base network.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
USD2013_to_EUR2013:
dicountrate:
emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
conventional_carriers:
co2limit:
extendable_carriers:
include_renewable_capacities_from_OPSD:
estimate_renewable_capacities_from_capacity_stats:
load:
scaling_factor:
renewable:
hydro:
carriers:
hydro_max_hours:
hydro_capital_cost:
lines:
length_factor:
.. seealso::
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
Inputs
------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``data/bundle/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country.
.. image:: ../img/hydrocapacities.png
:scale: 34 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!
- ``resources/opsd_load.csv`` Hourly per-country load profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`
- ``resources/profile_{}.nc``: all technologies in ``config["renewables"].keys()``, confer :ref:`renewableprofiles`.
- ``networks/base.nc``: confer :ref:`base`
Outputs
-------
- ``networks/elec.nc``:
.. image:: ../img/elec.png
:scale: 33 %
Description
-----------
The rule :mod:`add_electricity` ties all the different data inputs from the preceding rules together into a detailed PyPSA network that is stored in ``networks/elec.nc``. It includes:
- today's transmission topology and transfer capacities (optionally including lines which are under construction according to the config settings ``lines: under_construction`` and ``links: under_construction``),
- today's thermal and hydro power generation capacities (for the technologies listed in the config setting ``electricity: conventional_carriers``), and
- today's load time-series (upsampled in a top-down approach according to population and gross domestic product)
It further adds extendable ``generators`` with **zero** capacity for
- photovoltaic, onshore and AC- as well as DC-connected offshore wind installations with today's locational, hourly wind and solar capacity factors (but **no** current capacities),
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)
"""
import logging
from _helpers import configure_logging, update_p_nom_max
import pypsa
import pandas as pd
import numpy as np
import xarray as xr
import geopandas as gpd
import powerplantmatching as pm
from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity
from vresutils import transfer as vtransfer
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def normed(s): return s/s.sum()
def _add_missing_carriers_from_costs(n, costs, carriers):
missing_carriers = | pd.Index(carriers) | pandas.Index |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), | pd.Timedelta('1 days 00:00:00') | pandas.Timedelta |
import pandas as pd
from typing import Dict, Any, List
from .template import Processor, Settings
class ParseSnpEffVcf(Processor):
LOG_INTERVAL = 10000 # variants
vcf: str
vcf_header: str
info_id_to_description: Dict[str, str]
data: List[Dict[str, Any]] # each dict is a row (i.e. variant)
def __init__(self, settings: Settings):
super().__init__(settings)
self.vcf_line_to_row = SnpEffVcfLineToRow(self.settings).main
def main(self, vcf: str):
self.vcf = vcf
self.logger.info(msg='Start parsing annotated VCF')
self.set_vcf_header()
self.set_info_id_to_description()
self.process_vcf_data()
self.save_csv()
def set_vcf_header(self):
self.vcf_header = ''
with open(self.vcf) as fh:
for line in fh:
if not line.startswith('#'):
break
self.vcf_header += line
def set_info_id_to_description(self):
self.info_id_to_description = GetInfoIDToDescription(self.settings).main(
vcf_header=self.vcf_header)
def process_vcf_data(self):
n = 0
self.data = []
with open(self.vcf) as fh:
for line in fh:
if line.startswith('#'):
continue
row = self.vcf_line_to_row(
vcf_line=line,
info_id_to_description=self.info_id_to_description)
self.data.append(row)
n += 1
if n % self.LOG_INTERVAL == 0:
self.logger.debug(msg=f'{n} variants parsed')
def save_csv(self):
df = | pd.DataFrame(self.data) | pandas.DataFrame |
"""
preprocesslayer labmda function for IPAC-CLABSI
Purpose
-------
Read excell file, seperate into separate patient files, generate 7 days
Indection Windows plots,
total timeline plot per patient and save patient csv into s3 bucekt.
Patient files are saved as {mrn}.csv in source-csv folder, will generate
the job-creation lambda which in terms triggers Sagemaker GroundTruth Module.
-----------------
* csv - tabular data with the patient information
"""
import os
import json
from datetime import timedelta
import datetime
import io
from io import StringIO
from matplotlib.pylab import plt
import pandas as pd
import boto3
s3_path = os.environ.get('S3_raw')
patient_processed = os.environ.get('patient_bucket')
def write_dataframe_to_csv_on_s3(dataframe, filename, bucket):
"""
Write a dataframe to a CSV on S3
----------
fieldname : dataframe
Pandas dataframe
fieldname: filename:
string
filename: bucket
string
"""
# Create buffer
csv_buffer = StringIO()
# Write dataframe to buffer
dataframe.to_csv(csv_buffer)
# Create S3 object
s3_resource = boto3.resource("s3")
# Write buffer to S3 object
s3_resource.Object(bucket, filename).put(
Body=csv_buffer.getvalue(),
ServerSideEncryption="aws:kms",
)
def relative_time_in_days(end_date, start_date):
"""
Returns the difference between dates in day unit.
"""
try:
difference = (end_date - start_date).days
except ValueError:
difference = 0
return difference
def plot_timeline(dataframe, patient):
"""
Generate the timeline plot for a patient
Columns
=======
['encntr_num', 'nursing_unit_short_desc',
'beg_effective_dt_tm','end_effective_dt_tm',
'facility_name_src', 'collection_dt_tm',
'mrn', 'encntr_type_desc_src_at_collection',
'admit_dt_tm', 'clinical_event_code_desc_src',
'collection_date_id', 'loc_room_desc_src_at_collection',
'loc_bed_desc_src_at_collection', 'disch_dt_tm',
'disch_disp_desc_src', 'lab_result',
'med_service_desc_src_at_collection',
'nursing_unit_desc_at_collection',
'nursing_unit_short_desc_at_collection',
'organism',
'result_interpretation_desc_src',
'specimen_type_desc_src', 'transfer_in_to_collect',
'transfer_out_to_collect','ce_dynamic_label_id',
'doc_set_name_result', 'encntr_id',
'first_activity_start_dt_tm',
'first_catheter_type_result',
'first_dressing_type_result',
'first_site_result',
'last_activity_end_dt_tm',
'line_tube_drain_insertion_seq',
'line_insert_to_collection',
'line_remove_to_collect',
'last_temperature_result_pre_collection',
'name_last','name_first',
'birth_date_id','gender_desc_src','bc_phn',
'home_addr_patient_postal_code_forward_sortation_area']
DataTime events
===============
- beg_effective_dt_tm = Nursing unit (ICU) admission date
- end_effective_dt_tm = Nursing unit (ICU) discharge date
- collection_dt_tm = Positive blood collection date
- admit_dt_tm = Admission date (begin of stay)
- disch_dt_tm = Discharge date (end of stay)
- first_activity_start_dt_tm = Catheter insertion
- last_activity_end_dt_tm = Catheter removal
"""
print('Generating timeline plot for {}'.format(patient))
# Convert all datetime values to datetime
datetime_column_names = [
'beg_effective_dt_tm',
'end_effective_dt_tm',
'collection_dt_tm',
'admit_dt_tm',
'disch_dt_tm',
'first_activity_start_dt_tm',
'last_activity_end_dt_tm',
]
# Convert all date to to datetime format, the input data is mm-dd-yyyy
for column_name in datetime_column_names:
dataframe[column_name] = pd.to_datetime(
dataframe[column_name], errors='coerce', format='%m/%d/%Y')
#
fig, axis = plt.subplots(figsize=(
12, 3 + len(dataframe['collection_dt_tm'].unique()) / 4), dpi=300)
collection_times = []
plotted_organisms = []
x_scale_label = {}
y_scale_label = []
dates = {}
# Generate a list of organisms,
# thus same organism found can be shown as the same color
unique_organisms = []
for index in dataframe.index:
organism = dataframe.loc[index, 'organism']
unique_organisms.append(organism)
# Iterate through all records and add them to the plot
for index in dataframe.index:
# Organism found for this record
organism = dataframe.loc[index, 'organism']
# Calcululate the relative date from admission
day = {
key: relative_time_in_days(
dataframe.loc[index, key], sorted(dataframe['admit_dt_tm'])[0])
for key in datetime_column_names
}
# 3 bar graph plots: patient visit, nuring unit, central line
bar_graphs = {
'Patient visit': {
'start': 'admit_dt_tm',
'stop': 'disch_dt_tm',
'y': 0,
'color': [0.8, 0.8, 0.8],
},
dataframe.loc[index, 'nursing_unit_short_desc']: {
'start': 'beg_effective_dt_tm',
'stop': 'end_effective_dt_tm',
'y': 1,
'color': [0.6, 0.6, 0.6],
},
'Central line': {
'start': 'first_activity_start_dt_tm',
'stop': 'last_activity_end_dt_tm',
'y': 2,
'color': [0.4, 0.4, 0.4],
},
}
# One type of markers for the positive blood collection dates
marker_graphs = {
'Blood collection': {
'start': 'collection_dt_tm',
'y': 0,
'color': [0.8, 0.2, 0.2],
},
}
# bar graphs: patient visit, nuring unit, central line
for label in bar_graphs:
period = (
dataframe.loc[index, bar_graphs[label]['start']],
dataframe.loc[index, bar_graphs[label]['stop']]
)
# Do not plot the same period twice
if label not in dates:
dates[label] = []
if period not in dates[label]:
# Bar plot for the period
axis.bar(
[day[bar_graphs[label]['start']]],
[0.8],
width=day[bar_graphs[label]['stop']] -
day[bar_graphs[label]['start']],
bottom=bar_graphs[label]['y'] + 0.1,
color=bar_graphs[label]['color'],
# edgecolor='w',
# linewidth=4,
align='edge',
)
# Put marker to the start and stop date, thus if there is
# a missing date it can still be seen.
axis.plot(
[day[bar_graphs[label]['start']]],
[bar_graphs[label]['y'] + 0.5],
'k>',
)
axis.plot(
[day[bar_graphs[label]['stop']]],
[bar_graphs[label]['y'] + 0.5],
'k<',
)
dates[label].append(period)
x_scale_label[day[bar_graphs[label]['start']]] = dataframe.loc[
index, bar_graphs[label]['start']]
x_scale_label[day[bar_graphs[label]['stop']]] = dataframe.loc[
index, bar_graphs[label]['stop']]
if label not in y_scale_label:
y_scale_label.append(label)
for label in marker_graphs:
# Blood collection
if float(
day[marker_graphs[
label]['start']]) not in collection_times:
if organism not in plotted_organisms:
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
marker='o',
markersize=14,
linestyle='',
color=plt.cm.tab10(unique_organisms.index(organism)),
label=organism.replace(', ',"\n"),
)
plotted_organisms.append(organism)
else:
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
marker='o',
markersize=14,
linestyle='',
color=plt.cm.tab10(unique_organisms.index(organism)),
)
axis.plot(
[day[marker_graphs[label]['start']]],
[marker_graphs[label]['y'] + 0.5],
'wo',
markersize=5,
color='0.8'
)
collection_times.append(
float(day[marker_graphs[label]['start']]))
x_scale_label[day[
marker_graphs[label]['start']]] = dataframe.loc[
index, marker_graphs[label]['start']]
if label not in dates:
dates[label] = []
dates[label].append(day[marker_graphs[label]['start']])
axis.set_yticks([value + 0.5 for value in range(len(y_scale_label))])
axis.set_yticklabels(y_scale_label)
axis.set_ylim(0, len(y_scale_label))
axis.set_xticks(list(x_scale_label.keys()))
axis.set_xticklabels([
str(value)[:10] for value in x_scale_label.values()], rotation=90)
axis.set_xlabel('Date')
axis.set_axisbelow(True)
plt.legend(
bbox_to_anchor=(1.04, 1), loc='upper left',
ncol=1, title='Positive blood sample')
plt.tight_layout()
buf = io.BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
image = buf.read()
s3_resource = boto3.resource("s3")
# saving the patient total timeline
# plots to processed/images/patient/timeline.png
filename = f'images/{patient}/timeline.png'
bucket = os.environ['patient_bucket']
print('Timeline plot path for patient {}: {}'.format(patient, filename))
s3_resource.Object(bucket, filename).put(
Body=image, ServerSideEncryption="aws:kms")
def get_start_end_time(dataframe):
"""
Creating middle_time, start_time, end_time columns for creation
of Infection Time Window plots, for each collection date.
----------
fieldname : dataframe
Pandas dataframe
Returns
--------
dataframe_
temp dataframe for ploting total timelines
"""
for i in dataframe.index:
dataframe.loc[i, 'middle_time'] = dataframe.loc[
i, 'collection_dt_tm']
dataframe.loc[i, 'start_time'] = dataframe.loc[
i, 'collection_dt_tm'] - timedelta(days=3)
dataframe.loc[i, 'end_time'] = dataframe.loc[
i, 'collection_dt_tm'] + timedelta(days=3)
return dataframe
def estimate_text_size(text):
"""
Provide a text size estimate based on the length of the text
Parameters
----------
text: string
Text meant to print on the IWP plot
Returns
-------
fontsize: float
Estimated best fontsize
"""
fontsize = 12
if len(text) > 50:
fontsize -= (len(text) - 50) / 5
if fontsize < 5:
fontsize = 5
return fontsize
def generate_iwp_plot(dataframe, temperature, plot_index, patient):
"""
Generate individual IWP plot for each positive blood collection.
"""
dataframe = dataframe.copy()
# Convert all datetime values to datetime
datetime_column_names = [
'beg_effective_dt_tm',
'end_effective_dt_tm',
'collection_dt_tm',
'admit_dt_tm',
'disch_dt_tm',
'first_activity_start_dt_tm',
'last_activity_end_dt_tm',
]
# Convert all date to to datetime format, the input data is mm-dd-yyyy
for column_name in datetime_column_names:
dataframe[column_name] = pd.to_datetime(
dataframe[column_name], errors='coerce',
# format='%m/%d/%Y',
)
collection_date = dataframe.loc[plot_index, 'collection_dt_tm']
day3 = pd.Timedelta(days=3)
fig, axis = plt.subplots(
2, 1, True, False,
figsize=(7, 7), dpi=150,
gridspec_kw={'height_ratios': [1, 2.5]},
)
# Generate the temperature plot - top portion
# Fever limit, above limit the
temperature_limit = 38.0
# Mark the temperature limit (38 C) with a solid line
axis[0].plot_date(
[collection_date - day3, collection_date + day3],
[temperature_limit, temperature_limit],
'k-',
color='0.4',
)
# Plot all temperature information
for temperature_index in temperature.index:
temp_date = temperature.loc[temperature_index, 'event_end_dt_tm']
value = temperature.loc[temperature_index, 'result_val']
try:
# Above limit the marker is red
if value < temperature_limit:
markercolor = '0.4'
else:
markercolor = [0.8, 0.2, 0.2]
# Plot the dates - temperature information
axis[0].plot_date(
temp_date, value, 'wo',
markeredgecolor=markercolor,
markersize=6,
markeredgewidth=4,
)
except ValueError:
print('failure in plotting temperature')
# Plot catheter start and end
if not ( | pd.isnull(dataframe.loc[plot_index, 'first_activity_start_dt_tm']) | pandas.isnull |
import torch
import hamiltorch
# import URSABench as mctestbed
import argparse
import inference
from util import *
import torch.nn as nn
import torch.nn.functional as F
print('####################')
print('Version: ',hamiltorch.__version__)
print('####################')
'''set up hyperparameters of the experiments'''
parser = argparse.ArgumentParser(description='Logistic regression')
parser.add_argument('--num_samples', type=int, default=100000) #100
parser.add_argument('--device', type=int, default=0) #100
parser.add_argument('--step_size', type=float, default=0.0004)
parser.add_argument('--thinning', type=int, default=10)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--MH_burn_in', type=int, default=1000)
parser.add_argument('--prior_prec', type=float, default=1.)
parser.add_argument('--milestones_con', nargs='+', default = [1000, 2000, 3000])
parser.add_argument('--milestones_track', nargs='+', default = [1000, 2000, 3000])
parser.add_argument('--steps_per_milestone_con', nargs='+', default = [1, 5, 10])
parser.add_argument('--steps_per_milestone_track', nargs='+', default = [1, 5, 10])
parser.add_argument('--save', type=str, help='Save name', default = '')
parser.add_argument('--inference', type=str, default='HMC')
# Namespace(MH_burn_in=1000, device=0, inference='DULA', milestones_con=[1000, 2000, 3000], milestones_track=[1000, 2000, 3000], num_samples=4000, prior_prec=1.0, save='MH', seed=0, step_size=1e-07, steps_per_milestone_con=[1, 5, 10], steps_per_milestone_track=[1, 5, 10], thinning=10)
args = parser.parse_args()
print (args)
### Import data sets
# from URSABench import datasets, models
hamiltorch.set_random_seed(args.seed)
from sklearn.datasets import load_boston
import pandas as pd
bos = load_boston()
bos.keys()
df = | pd.DataFrame(bos.data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 16:59:42 2017
@author: Flamingo
"""
import numpy as np
import pandas as pd
import math
import sys
sys.path.append('../TOOLS')
from IJCAI2017_TOOL import *
def SSD(Temp,Velo,Humi):
score = (1.818*Temp+18.18) * (0.88+0.002*Humi) + 1.0*(Temp -32)/(45-Temp) - 3.2*Velo + 18.2
return score
WEATHER_raw = pd.read_csv('../additional/WEATHER_raw.csv',encoding = 'gbk',low_memory=False)
#%%
def AMPM2decimal(ser):
tt = ser.replace(' ',':').split(':')
tt[0] = np.int(tt[0])%12
if (tt[2] == 'AM'):
return np.float(tt[0]) + np.float(tt[1])/60.
if (tt[2] == 'PM'):
return np.float(tt[0]) + np.float(tt[1])/60. + 12.
def Eventclean(ser):
try:
if (math.isnan(ser)):
return 'None'
except:
tt = ser.replace('\n','\r').replace('\t','\r').split('\r')
tt2 = ''.join(tt)
return tt2
#%% clean the raw data
WEATHER_raw = WEATHER_raw[['DATE','Time','Temp','Visibility','Wind_speed','Humidity','Event','Condition','CITY_EN']]
WEATHER_raw['Time'] = [(lambda x:AMPM2decimal(x) ) (x) for x in WEATHER_raw['Time']]
WEATHER_raw['Event'] = [(lambda x:Eventclean(x) ) (x) for x in WEATHER_raw['Event']]
WEATHER_raw['Visibility'] = WEATHER_raw['Visibility'].replace('-',np.nan).fillna(method='ffill')
WEATHER_raw['Visibility'] = pd.to_numeric(WEATHER_raw['Visibility'], errors='ignore')
WEATHER_raw['Temp'] = WEATHER_raw['Temp'].replace('-',0.0)
WEATHER_raw['Temp'] = pd.to_numeric(WEATHER_raw['Temp'], errors='ignore')
WEATHER_raw.loc[ WEATHER_raw['Wind_speed'] == 'Calm','Wind_speed']= 0.0
WEATHER_raw['Wind_speed'] = WEATHER_raw['Wind_speed'].replace('-','3.6')
WEATHER_raw['Wind_speed'] = pd.to_numeric(WEATHER_raw['Wind_speed'], errors='ignore')
WEATHER_raw['Wind_speed'] = WEATHER_raw['Wind_speed']/3.6
WEATHER_raw['Humidity'] = WEATHER_raw['Humidity'].replace('N/A%','5%')
WEATHER_raw.loc[ WEATHER_raw['Humidity'] == '%','Humidity']= '5%'
WEATHER_raw['Humidity'] = [(lambda x: (np.int(x.split('%')[0]) ) ) (x) for x in WEATHER_raw['Humidity']]
WEATHER_raw['SSD'] = SSD(WEATHER_raw['Temp'] ,WEATHER_raw['Wind_speed'],WEATHER_raw['Humidity'])
WEATHER_raw.loc[ WEATHER_raw['Condition'] == 'Unknown','Condition']= np.nan
WEATHER_raw['Condition'] = WEATHER_raw['Condition'].fillna(method='ffill')
WEATHER_CON_LEVEL = | pd.read_csv('WEATHER_CON_LEVEL.csv') | pandas.read_csv |
""" HYPSLIT MODEL READER """
import sys
import datetime
import pandas as pd
import xarray as xr
import numpy as np
from numpy import fromfile, arange
"""
This code developed at the NOAA Air Resources Laboratory.
<NAME>
<NAME>
-------------
Functions:
-------------
open_dataset :
combine_dataset :
get_latlongrid :
hysp_heights: determines ash top height from HYSPLIT
hysp_massload: determines total mass loading from HYSPLIT
calc_aml: determines ash mass loading for each altitude layer from HYSPLIT
hysp_thresh: calculates mask array for ash mass loading threshold from HYSPLIT
add_species(dset): adds concentrations due to different species.
--------
Classes
--------
ModelBin
"""
# def _hysplit_latlon_grid_from_dataset(ds):
# pargs = dict()
# pargs["lat_0"] = ds.latitude.mean()
# pargs["lon_0"] = ds.longitude.mean()
#
# p4 = (
# "+proj=eqc +lat_ts={lat_0} +lat_0={lat_0} +lon_0={lon_0} "
# "+ellps=WGS84 +datum=WGS84 +units=m +no_defs".format(**pargs)
# )
# return p4
# def get_hysplit_latlon_pyresample_area_def(ds, proj4_srs):
# from pyresample import geometry
#
# return geometry.SwathDefinition(lons=ds.longitude.values, lats=ds.latitude.values)
def open_dataset(fname, drange=None, verbose=False):
"""Short summary.
Parameters
----------
fname : string
Name of "cdump" file. Binary HYSPLIT concentration output file.
drange : list of two datetime objects
cdump file contains concentration as function of time. The drange
specifies what times should be loaded from the file. A value of None
will result in all times being loaded.
verbose : boolean
If True will print out extra messages
addgrid : boolean
assigns an area attribute to each variable
Returns
-------
dset : xarray DataSet
CHANGES for PYTHON 3
For python 3 the numpy char4 are read in as a numpy.bytes_ class and need to
be converted to a python
string by using decode('UTF-8').
"""
# open the dataset using xarray
binfile = ModelBin(fname, drange=drange, verbose=verbose, readwrite="r")
dset = binfile.dset
# return dset
# get the grid information
# May not need the proj4 definitions now that lat lon defined properly.
# if addarea:
# p4 = _hysplit_latlon_grid_from_dataset(dset)
# swath = get_hysplit_latlon_pyresample_area_def(dset, p4)
# now assign this to the dataset and each dataarray
# dset = dset.assign_attrs({"proj4_srs": p4})
# for iii in dset.variables:
# dset[iii] = dset[iii].assign_attrs({"proj4_srs": p4})
# for jjj in dset[iii].attrs:
# dset[iii].attrs[jjj] = dset[iii].attrs[jjj].strip()
# dset[iii] = dset[iii].assign_attrs({"area": swath})
# dset = dset.assign_attrs(area=swath)
return dset
def check_drange(drange, pdate1, pdate2):
"""
drange : list of two datetimes
pdate1 : datetime
pdate2 : datetime
Returns
savedata : boolean
returns True if drange is between pdate1 and pdate2
"""
savedata = True
testf = True
# if pdate1 is within drange then save the data.
# AND if pdate2 is within drange then save the data.
# if drange[0] > pdate1 then stop looping to look for more data
# this block sets savedata to true if data within specified time
# range or time range not specified
if drange is None:
savedata = True
elif pdate1 >= drange[0] and pdate1 <= drange[1] and pdate2 <= drange[1]:
savedata = True
elif pdate1 > drange[1] or pdate2 > drange[1]:
testf = False
savedata = False
else:
savedata = False
# END block
# if verbose:
# print(savedata, 'DATES :', pdate1, pdate2)
return testf, savedata
class ModelBin:
"""
represents a binary cdump (concentration) output file from HYSPLIT
methods:
readfile - opens and reads contents of cdump file into an xarray
self.dset
"""
def __init__(
self, filename, drange=None, century=None, verbose=True, readwrite="r"
):
"""
drange : list of two datetime objects.
The read method will store data from the cdump file for which the
sample start is greater thand drange[0] and less than drange[1]
for which the sample stop is less than drange[1].
century : integer
verbose : boolean
read
"""
self.drange = drange
self.filename = filename
self.century = century
self.verbose = verbose
# list of tuples (date1, date2) of averaging periods with zero
# concentrations
self.zeroconcdates = []
# list of tuples of averaging periods with nonzero concentrtations]
self.nonzeroconcdates = []
self.sourcedate = []
self.slat = []
self.slon = []
self.sht = []
self.atthash = {}
self.atthash["Starting Locations"] = []
self.atthash["Source Date"] = []
self.llcrnr_lon = None
self.llcrnr_lat = None
self.nlat = None
self.nlon = None
self.dlat = None
self.dlon = None
self.levels = None
if readwrite == "r":
self.dataflag = self.readfile(
filename, drange, verbose=verbose, century=century
)
@staticmethod
def define_struct():
"""Each record in the fortran binary begins and ends with 4 bytes which
specify the length of the record. These bytes are called pad below.
They are not used here, but are thrown out. The following block defines
a numpy dtype object for each record in the binary file. """
from numpy import dtype
real4 = ">f"
int4 = ">i"
int2 = ">i2"
char4 = ">a4"
rec1 = dtype(
[
("pad1", int4),
("model_id", char4), # meteorological model id
("met_year", int4), # meteorological model starting time
("met_month", int4),
("met_day", int4),
("met_hr", int4),
("met_fhr", int4), # forecast hour
("start_loc", int4), # number of starting locations
("conc_pack", int4), # concentration packing flag (0=no, 1=yes)
("pad2", int4),
]
)
# start_loc in rec1 tell how many rec there are.
rec2 = dtype(
[
("pad1", int4),
("r_year", int4), # release starting time
("r_month", int4),
("r_day", int4),
("r_hr", int4),
("s_lat", real4), # Release location
("s_lon", real4),
("s_ht", real4),
("r_min", int4), # release startime time (minutes)
("pad2", int4),
]
)
rec3 = dtype(
[
("pad1", int4),
("nlat", int4),
("nlon", int4),
("dlat", real4),
("dlon", real4),
("llcrnr_lat", real4),
("llcrnr_lon", real4),
("pad2", int4),
]
)
rec4a = dtype(
[
("pad1", int4),
("nlev", int4), # number of vertical levels in concentration grid
]
)
rec4b = dtype([("levht", int4)]) # height of each level (meters above ground)
rec5a = dtype(
[
("pad1", int4),
("pad2", int4),
("pollnum", int4), # number of different pollutants
]
)
rec5b = dtype([("pname", char4)]) # identification string for each pollutant
rec5c = dtype([("pad2", int4)])
rec6 = dtype(
[
("pad1", int4),
("oyear", int4), # sample start time.
("omonth", int4),
("oday", int4),
("ohr", int4),
("omin", int4),
("oforecast", int4),
("pad3", int4),
]
)
# rec7 has same form as rec6. #sample stop time.
# record 8 is pollutant type identification string, output level.
rec8a = dtype(
[
("pad1", int4),
("poll", char4), # pollutant identification string
("lev", int4),
("ne", int4), # number of elements
]
)
rec8b = dtype(
[
("indx", int2), # longitude index
("jndx", int2), # latitude index
("conc", real4),
]
)
rec8c = dtype([("pad2", int4)])
recs = (
rec1,
rec2,
rec3,
rec4a,
rec4b,
rec5a,
rec5b,
rec5c,
rec6,
rec8a,
rec8b,
rec8c,
)
return recs
def parse_header(self, hdata1):
"""
hdata1 : dtype
Returns
nstartloc : int
number of starting locations in file.
"""
if len(hdata1["start_loc"]) != 1:
print(
"WARNING in ModelBin _readfile - number of starting locations "
"incorrect"
)
print(hdata1["start_loc"])
# in python 3 np.fromfile reads the record into a list even if it is
# just one number.
# so if the length of this record is greater than one something is
# wrong.
nstartloc = hdata1["start_loc"][0]
self.atthash["Meteorological Model ID"] = hdata1["model_id"][0].decode("UTF-8")
self.atthash["Number Start Locations"] = nstartloc
return nstartloc
def parse_hdata2(self, hdata2, nstartloc, century):
# Loop through starting locations
for nnn in range(0, nstartloc):
# create list of starting latitudes, longitudes and heights.
self.slat.append(hdata2["s_lat"][nnn])
self.slon.append(hdata2["s_lon"][nnn])
self.sht.append(hdata2["s_ht"][nnn])
self.atthash["Starting Locations"].append(
(hdata2["s_lat"][nnn], hdata2["s_lon"][nnn])
)
# try to guess century if century not given
if century is None:
if hdata2["r_year"][0] < 50:
century = 2000
else:
century = 1900
print(
"WARNING: Guessing Century for HYSPLIT concentration file", century
)
# add sourcedate which is datetime.datetime object
sourcedate = datetime.datetime(
century + hdata2["r_year"][nnn],
hdata2["r_month"][nnn],
hdata2["r_day"][nnn],
hdata2["r_hr"][nnn],
hdata2["r_min"][nnn],
)
self.sourcedate.append(sourcedate)
self.atthash["Source Date"].append(sourcedate)
return century
def parse_hdata3(self, hdata3, ahash):
# Description of concentration grid
ahash["Number Lat Points"] = hdata3["nlat"][0]
ahash["Number Lon Points"] = hdata3["nlon"][0]
ahash["Latitude Spacing"] = hdata3["dlat"][0]
ahash["Longitude Spacing"] = hdata3["dlon"][0]
ahash["llcrnr longitude"] = hdata3["llcrnr_lon"][0]
ahash["llcrnr latitude"] = hdata3["llcrnr_lat"][0]
self.llcrnr_lon = hdata3["llcrnr_lon"][0]
self.llcrnr_lat = hdata3["llcrnr_lat"][0]
self.nlat = hdata3["nlat"][0]
self.nlon = hdata3["nlon"][0]
self.dlat = hdata3["dlat"][0]
self.dlon = hdata3["dlon"][0]
return ahash
def parse_hdata4(self, hdata4a, hdata4b):
self.levels = hdata4b["levht"]
self.atthash["Number of Levels"] = hdata4a["nlev"][0]
self.atthash["Level top heights (m)"] = hdata4b["levht"]
def parse_hdata6and7(self, hdata6, hdata7, century):
# if no data read then break out of the while loop.
if not hdata6:
return False, None, None
pdate1 = datetime.datetime(
century + hdata6["oyear"], hdata6["omonth"], hdata6["oday"], hdata6["ohr"]
)
pdate2 = datetime.datetime(
century + hdata7["oyear"], hdata7["omonth"], hdata7["oday"], hdata7["ohr"]
)
dt = pdate2 - pdate1
sample_dt = dt.days * 24 + dt.seconds / 3600.0
self.atthash["Sampling Time"] = pdate2 - pdate1
self.atthash["sample time hours"] = sample_dt
return True, pdate1, pdate2
@staticmethod
def parse_hdata8(hdata8a, hdata8b, pdate1):
"""
hdata8a : dtype
hdata8b : dtype
pdate1 : datetime
Returns:
concframe : DataFrame
"""
lev_name = hdata8a["lev"][0]
col_name = hdata8a["poll"][0].decode("UTF-8")
edata = hdata8b.byteswap().newbyteorder() # otherwise get endian error.
concframe = | pd.DataFrame.from_records(edata) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 09:31:55 2017
@author: matthew.goodwin
"""
import datetime
import sqlite3
import pandas as pd
import numpy as np
import os
import xlwt
sqlite_file="reservations.db"
# Set path for output based on relative path and location of script
FileDir = os.path.dirname(__file__)
print (FileDir)
OUTDIR = os.path.join(FileDir, 'output')
#Variable set up
#==============================================================================
# These variables will not change through the years. I initialize them as None
# so that in years beyond the first year analyzed, these values do not have to be calculated again
#==============================================================================
FACILITYID_filtered = None
campsite_count = None
# Set Agency IDs that are present in the Reservation database
##Note: There is a '' agency for some reservations. This appears to be related to Reserve America
AgencyIDs = ['USFS'] #['NPS', 'USFS','USACE','Reserve America','NARA','BLM','FWS','BOR']
#Adjust YEARS list for each year you want analysis for
#YEAR_TABLE will be automatically updated to have the Table names for the necessary sheets based on YEARS
##Note: Make sure the years your trying to have been loaded into the datbase in loading.py
#YEARS = [2015] #All years [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006]
YEARS = [2015, 2014, 2013, 2012, 2011, 2010]
#No need to modify once YEARS is set
YEAR_TABLE = []
#Initialize DB connections
recreation_cnxn = sqlite3.connect(sqlite_file)
recreation_cursor = recreation_cnxn.cursor()
for yr in YEARS:
YEAR_TABLE.append("Recreation_"+str(yr))
#crete folder for facilities
new_folder = os.path.join(OUTDIR, "Agency")
if not os.path.exists(new_folder):
os.makedirs(new_folder)
#loop through years. "Enumerate" also provides access to index
for agency in AgencyIDs:
#loop through RecAreas if more than one
for index, years in enumerate(YEARS):
print("Running Analysis for " + agency + " in " + str(years))
# These tasks (done using PANDAS) are setup to run at the recArea level
#get facility IDs in rec area using Data/RecAreaFacilities_API_v1.csv
print (datetime.datetime.now().time())
#Pull in RecArea/Facility information
RecArea_query='''
select *
from RecAreaFacilities
'''
RecArea_Fac = pd.read_sql_query(RecArea_query,recreation_cnxn)
#This pulls all reservation data belonging to and agency from the given years
#reservation data
#setup SQL query
fac_target_query= '''
SELECT ___RESYEAR___.CustomerZIP, ___RESYEAR___.FacilityID , RecAreaFacilities.FACILITYID,RecAreaFacilities.RECAREAID,
___RESYEAR___.EndDate,___RESYEAR___.StartDate,___RESYEAR___.OrderDate,___RESYEAR___.CustomerCountry,___RESYEAR___.CustomerState,___RESYEAR___.FacilityState,
___RESYEAR___.FacilityZIP,___RESYEAR___.EntityType,___RESYEAR___.OrgID,___RESYEAR___.NumberOfPeople
FROM ___RESYEAR___ LEFT JOIN RecAreaFacilities
ON ___RESYEAR___.FacilityID = RecAreaFacilities.FACILITYID
WHERE AGENCY = ___AGIDS___;
'''
# fac_target_query = '''
# select *
# from ___RESYEAR___
# where AGENCY = ___AGIDS___
# '''
temp_fac_target_query = fac_target_query.replace("___RESYEAR___", YEAR_TABLE[index])
temp_fac_target_query = temp_fac_target_query.replace("___AGIDS___", "'"+agency+"'")
#Make SQL query
print('Gathering '+agency+' Reservation Data for '+str(years))
target_fac = pd.read_sql_query(temp_fac_target_query, recreation_cnxn)
target_fac = target_fac.reset_index()
#Run Analysis on collected facility data for RecArea
#Convert EndDate, StateDate and OrderDate to datetime format
target_fac['EndDate'] = pd.to_datetime(target_fac['EndDate'])
target_fac['StartDate'] = | pd.to_datetime(target_fac['StartDate']) | pandas.to_datetime |
from libsvm.svmutil import *
from libsvm.svm import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像时负号'-'显示为方块的问题
def list_of_dict_to_list_of_list(list_of_dict: list) -> list:
"""
将元素为字典的列表转换成元素为列表的列表,仅取value
Args:
list_of_dict (list): 元素为字典的列表
Returns:
list: 元素为列表的列表
"""
list_of_list = [[v for v in d.values()] for d in list_of_dict]
return list_of_list
def get_dividing_point(y: list):
"""
找出不同样例的分界点
Args:
y (list): 数据标签
Returns:
int: -1表示全部相同,否则表示分界点
"""
last = y[0]
for i, yi in enumerate(y):
if yi != last:
return i
else:
last = yi
return -1
def scatter_training_set(x: list, y: list, axes):
"""
绘制训练集散点图
Args:
x (list): 数据特征
y (list): 数据标签
axes (matplotlib.axes._base._AxesBase): 要绘图的Axes实例
Returns:
None
"""
x_array = np.array(list_of_dict_to_list_of_list(x))
x1 = x_array[:, 0]
x2 = x_array[:, 1]
dividing_point = get_dividing_point(y)
axes.scatter(x1[:dividing_point], x2[:dividing_point])
axes.scatter(x1[dividing_point:], x2[dividing_point:])
def leave_one_out(x: list, y: list, param_str: str):
"""
进行留一交叉验证
Args:
x (list): 数据特征
y (list): 数据标签
param_str (str): SVM参数指令
Returns:
留一交叉验证精度
"""
param_str += " -v " + str(len(y))
accuracy = svm_train(y, x, param_str)
return accuracy
def solve_predict(x: list, y: list, param_str: str):
"""
训练模型SVM并用于分类
Args:
x (list): 数据特征
y (list): 数据标签
param_str (str): SVM参数指令
Returns:
p_label, p_acc, p_val, model
"""
prob = svm_problem(y, x)
param = svm_parameter(param_str)
model = svm_train(prob, param)
p_label, p_acc, p_val = svm_predict(y, x, model)
return p_label, p_acc, p_val, model
def tuning_gauss(x: list, y: list, c_range: np.ndarray, g_range: np.ndarray):
"""
SVM高斯核调参
Args:
x (list): 数据特征
y (list): 数据标签
c_range (np.ndarray): c参数所有取值
g_range (np.ndarray): g参数所有取值
Returns:
best_result (dict): 调参的最优结果,包含精度和c、g取值
result_frame (pd.DataFrame): 调参过程中所有c、g和对应精度
"""
best_result = {"Accuracy": -1, "c": -1, "g": -1}
result_file_name = "best_result.txt"
result_array = []
clear_file(result_file_name)
for c in c_range:
for g in g_range:
param_str = '-q -t 2 -c ' + str(c) + ' -g ' + str(g)
accuracy = leave_one_out(x, y, param_str)
result_array.append([float(format(c, '.6f')), float(format(g, '.6f')), accuracy])
if accuracy >= best_result["Accuracy"]:
best_result["Accuracy"] = accuracy
best_result["c"] = c
best_result["g"] = g
append_dict_to_file(result_file_name, best_result)
result_frame = | pd.DataFrame(result_array, columns=['c', 'g', 'Accuracy']) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import datetime
now = datetime.datetime.now()
def obtener_datos():
url = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto19/CasosActivosPorComuna_std.csv"
return pd.read_csv(url,header=0)
@st.cache
def obtener_fecha_inicio():
df = obtener_datos()
fechas = []
for i in df['Fecha']:
if i not in fechas:
fechas.append(i)
return fechas[0]
@st.cache
def obtener_regiones():
df = obtener_datos()
regiones = []
for i in df['Region']:
if i not in regiones:
regiones.append(i)
return regiones
@st.cache
def obtener_comunas(region):
df = obtener_datos()
comunas = []
for i in range(len(df)):
if df['Region'][i]==region and (df['Comuna'][i]!='Desconocido '+region and df['Comuna'][i]!='Total'):
if df['Comuna'][i] not in comunas:
comunas.append(df['Comuna'][i])
return comunas
@st.cache(allow_output_mutation=True)
def filtrar_datos_region_fecha(region,fecha_inicio,fecha_termino):
df = obtener_datos()
df = pd.DataFrame(data=df)
data = pd.DataFrame()
for i in range(len(df)):
if df.loc[i,'Region']==region and df.loc[i,'Comuna']=='Total':
data[i] = df.loc[i]
data = data.T
data = data.drop(columns=['Codigo region','Comuna','Codigo comuna','Region','Poblacion'])
data = data.groupby(['Fecha']).sum()
data = data.loc[str(fecha_inicio):str(fecha_termino)]
data.rename(columns={'Casos activos':str(region)},inplace=True)
return data
@st.cache(allow_output_mutation=True)
def filtrar_datos_comuna_fecha(comuna,fecha_inicio,fecha_termino):
df = obtener_datos()
df = pd.DataFrame(data=df)
data = | pd.DataFrame() | pandas.DataFrame |
# Exporters
from nyoka import skl_to_pmml, KerasToPmml, ArimaToPMML, ExponentialSmoothingToPMML
# Nyoka preprocessings
from nyoka.preprocessing import Lag
# Pipeline/ DataFrameMapper
from sklearn.pipeline import Pipeline
from sklearn_pandas import DataFrameMapper
# Sklearn Preprocessing
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler, LabelEncoder, Imputer,\
Binarizer, PolynomialFeatures, LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
## Sklearn models
# Linear models
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier
# Tree models
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
# SVM
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
# Ensemble
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier,\
RandomForestRegressor, IsolationForest
# Clustering
from sklearn.cluster import KMeans
# Naive Bayes
from sklearn.naive_bayes import GaussianNB
#Neighbors
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
# keras models
from keras.applications import MobileNet, ResNet50, VGG16, Xception, InceptionV3, DenseNet121
from keras.layers import Input
# statsmodels models
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.holtwinters import ExponentialSmoothing
import unittest
from sklearn import datasets
from sklearn.datasets import fetch_20newsgroups
import xmlschema
import pandas as pd
class StatsmodelsDataHelper:
def getData1(self):
# data with trend and seasonality present
# no of international visitors in Australia
data = [41.7275, 24.0418, 32.3281, 37.3287, 46.2132, 29.3463, 36.4829, 42.9777, 48.9015, 31.1802, 37.7179,
40.4202, 51.2069, 31.8872, 40.9783, 43.7725, 55.5586, 33.8509, 42.0764, 45.6423, 59.7668, 35.1919,
44.3197, 47.9137]
index = pd.DatetimeIndex(start='2005', end='2010-Q4', freq='QS')
ts_data = pd.Series(data, index)
ts_data.index.name = 'datetime_index'
ts_data.name = 'n_visitors'
return ts_data
def getData2(self):
# data with trend but no seasonality
# no. of annual passengers of air carriers registered in Australia
data = [17.5534, 21.86, 23.8866, 26.9293, 26.8885, 28.8314, 30.0751, 30.9535, 30.1857, 31.5797, 32.5776,
33.4774, 39.0216, 41.3864, 41.5966]
index = pd.DatetimeIndex(start='1990', end='2005', freq='A')
ts_data = pd.Series(data, index)
ts_data.index.name = 'datetime_index'
ts_data.name = 'n_passengers'
return ts_data
def getData3(self):
# data with no trend and no seasonality
# Oil production in Saudi Arabia
data = [446.6565, 454.4733, 455.663, 423.6322, 456.2713, 440.5881, 425.3325, 485.1494, 506.0482, 526.792,
514.2689, 494.211]
index = pd.DatetimeIndex(start='1996', end='2008', freq='A')
ts_data = pd.Series(data, index)
ts_data.index.name = 'datetime_index'
ts_data.name = 'oil_production'
return ts_data
def getData4(self):
# Non Seasonal Data
data = [266,146,183,119,180,169,232,225,193,123,337,186,194,150,210,273,191,287,
226,304,290,422,265,342,340,440,316,439,401,390,490,408,490,420,520,480]
index = pd.DatetimeIndex(start='2016-01-01', end='2018-12-01', freq='MS')
ts_data = pd.Series(data, index)
ts_data.index.name = 'date_index'
ts_data.name = 'cars_sold'
return ts_data
def getData5(self):
# Seasonal Data
data = [112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150,
178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235,
229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315,
364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467,
404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407,
362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432]
index = | pd.DatetimeIndex(start='1949-01-01', end='1960-12-01', freq='MS') | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""ecommerece analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zCEdCdcE2Iwg30UpGFp4Il5zrriNBp2y
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder , StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from keras.models import Sequential
from keras.layers import Dense
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.feature_selection import chi2
from scipy.stats import chi2_contingency
dataset= | pd.read_csv('/content/Train.csv') | pandas.read_csv |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # QA queries on new CDR row suppression
#
# Verify all rows identified for suppression in the deid dataset have been set to null.
#
# (Query results: ThIS query returned no results.)
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + tags=["parameters"]
# Parameters
project_id = ""
deid_cdr = ""
com_cdr =""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# # 1 Verify all fields identified for suppression in the OBSERVATION table have been removed from the table in the deid dataset.
query = f'''
WITH df1 AS (
SELECT observation_id
FROM `{project_id}.{com_cdr}.observation`
WHERE observation_source_value LIKE '%SitePairing%'
OR observation_source_value LIKE '%ArizonaSpecific%'
OR observation_source_value LIKE 'EastSoutheastMichigan%'
OR observation_source_value LIKE 'SitePairing_%'
)
SELECT
SUM(CASE WHEN value_as_string IS NOT NULL THEN 1 ELSE 0 END) AS n_value_as_string_not_null,
SUM(CASE WHEN value_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_value_source_value_not_null,
SUM(CASE WHEN observation_source_value IS NOT NULL THEN 1 ELSE 0 END) AS n_observation_source_value_not_null
FROM `{project_id}.{deid_cdr}.observation`
WHERE observation_id IN (SELECT observation_id FROM df1)
AND ((observation_source_value IS NOT NULL)
OR (value_source_value IS NOT NULL)
OR (value_as_string IS NOT NULL))
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset
from transformers import Trainer,TrainingArguments
from transformers import BertTokenizer
from transformers import BertForSequenceClassification
from sklearn.metrics import accuracy_score, f1_score
from transformers import Trainer
class SarcasmDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
## Test Dataset
class SarcasmTestDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
return item
def __len__(self):
return len(self.encodings)
def compute_metrics(p):
pred, labels = p
pred = np.argmax(pred, axis=1)
accuracy = accuracy_score(y_true=labels, y_pred=pred)
f1 = f1_score(labels, pred)
return {"accuracy": accuracy,"f1_score":f1}
def labels(x):
if x == 0:
return 0
else:
return 1
if __name__ == '__main__':
path = '../../Data/Train_Dataset.csv'
path_test = '../../Data/Test_Dataset.csv'
df = | pd.read_csv(path) | pandas.read_csv |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective_from': ef,
'effective_until': eu,
'observation_deleted_at': oda
}
nindex = pd.date_range(start='20191004T0000Z',
freq='1h', periods=10)
@pytest.fixture()
def ids():
return ['f2844284-ea0a-11e9-a7da-f4939feddd82',
'f3e310ba-ea0a-11e9-a7da-f4939feddd82',
'09ed7cf6-ea0b-11e9-a7da-f4939feddd82',
'0fe9f2ba-ea0b-11e9-a7da-f4939feddd82',
'67ea9200-ea0e-11e9-832b-f4939feddd82']
@pytest.fixture()
def aggobs(ids):
return tuple([
_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191004T0501Z')),
_make_aggobs(ids[2], eu=pd.Timestamp('20191004T0400Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0700Z'),
eu=pd.Timestamp('20191004T0800Z')),
_make_aggobs(ids[2], pd.Timestamp('20191004T0801Z')),
_make_aggobs(ids[3], oda=pd.Timestamp('20191005T0000Z')),
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191003T0000Z'))
])
def test_compute_aggregate(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_missing_from_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
aggobs = list(aggobs[:-2]) + [
_make_aggobs('09ed7cf6-ea0b-11e9-a7da-f4939fed889')]
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_empty_data(aggobs, ids):
data = {}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:2], nindex)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_missing_data(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
data[ids[-1]] = pd.DataFrame({'value': [1] * 8, 'quality_flag': [0] * 8},
index=nindex[:-2])
aggobs = list(aggobs[:-2]) + [_make_aggobs(ids[-1])]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series(
[3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 4.0, None, None],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
def test_compute_aggregate_deleted_not_removed(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids}
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_not_removed_yet(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# with last aggobs, would try and get data before effective_until,
# but was deleted, so raise error
aggobs = list(aggobs[:-2]) + [
_make_aggobs(ids[4], oda=pd.Timestamp('20191009T0000Z'),
eu=pd.Timestamp('20191004T0700Z'))]
with pytest.raises(ValueError):
utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
def test_compute_aggregate_deleted_but_removed_before(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
# aggobs[-1] properly removed
aggobs = list(aggobs[:-2]) + [aggobs[-1]]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}))
def test_compute_aggregate_mean(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'mean', aggobs[:-2])
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([1.0] * 10, index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)})
)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_compute_aggregate_no_overlap(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3], 'quality_flag': [2, 10, 338]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0230Z'])),
ids[1]: pd.DataFrame(
{'value': [3, 2, 1], 'quality_flag': [9, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0200Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'median', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, None, 2.5, None],
'quality_flag': [2, 10, 9, 338 | 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_before_effective(ids):
data = {ids[0]: pd.DataFrame(
{'value': [1, 2, 3, 0, 0], 'quality_flag': [2, 10, 338, 0, 0]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z'])),
ids[1]: pd.DataFrame(
{'value': [None, 2.0, 1.0], 'quality_flag': [0, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0200Z', '20191002T0230Z', '20191002T0300Z']))}
aggobs = [_make_aggobs(ids[0]),
_make_aggobs(ids[1], pd.Timestamp('20191002T0201Z'))]
agg = utils.compute_aggregate(data, '30min', 'ending',
'UTC', 'max', aggobs)
expected = pd.DataFrame(
{'value': [1.0, 2.0, 3.0, 2.0, 1.0],
'quality_flag': [2, 10, 338, 880, 10]},
index=pd.DatetimeIndex([
'20191002T0100Z', '20191002T0130Z', '20191002T0200Z',
'20191002T0230Z', '20191002T0300Z']))
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_bad_cols():
data = {'a': pd.DataFrame([0], index=pd.DatetimeIndex(
['20191001T1200Z']))}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC',
'mean', [_make_aggobs('a')])
def test_compute_aggregate_index_provided(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
the_index = nindex.copy()[::2]
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], the_index)
pdt.assert_frame_equal(agg, pd.DataFrame(
{'value': pd.Series([2.0, 2.0, 2.0, 2.0, 3.0],
index=the_index),
'quality_flag': pd.Series([0]*5, index=the_index)})
)
@pytest.mark.parametrize('dfindex,missing_idx', [
(pd.date_range(start='20191004T0000Z', freq='1h', periods=11), -1),
(pd.date_range(start='20191003T2300Z', freq='1h', periods=11), 0),
])
def test_compute_aggregate_missing_values_with_index(
aggobs, ids, dfindex, missing_idx):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], dfindex)
assert pd.isnull(agg['value'][missing_idx])
def test_compute_aggregate_partial_missing_values_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
data[ids[2]] = pd.DataFrame({'value': [1] * 5, 'quality_flag': [0] * 5},
index=nindex[5:])
agg = utils.compute_aggregate(data, '1h', 'ending',
'UTC', 'sum', aggobs[:-2], nindex)
expected = pd.DataFrame(
{'value': pd.Series(
[np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 3.0, 3.0],
index=nindex),
'quality_flag': pd.Series([0]*10, index=nindex)}
)
pdt.assert_frame_equal(agg, expected)
def test_compute_aggregate_missing_obs_with_index(aggobs, ids):
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:2]}
with pytest.raises(KeyError):
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
aggobs[:-2], nindex)
def test_compute_aggregate_out_of_effective(aggobs, ids):
limited_aggobs = [aggob
for aggob in aggobs
if aggob['effective_until'] is not None]
data = {id_: pd.DataFrame({'value': [1] * 10, 'quality_flag': [0] * 10},
index=nindex)
for id_ in ids[:3]}
max_time = pd.Series([o['effective_until'] for o in limited_aggobs]).max()
ooe_index = pd.date_range(
max_time + pd.Timedelta('1H'),
max_time + pd.Timedelta('25H'),
freq='60min'
)
with pytest.raises(ValueError) as e:
utils.compute_aggregate(data, '1h', 'ending', 'UTC', 'sum',
limited_aggobs, ooe_index)
assert str(e.value) == 'No effective observations in data'
def test__observation_valid(aggobs):
out = utils._observation_valid(
nindex, 'f2844284-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(True, index=nindex))
def test__observation_valid_ended(aggobs):
out = utils._observation_valid(
nindex, 'f3e310ba-ea0a-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series([False] * 6 + [True] * 4,
index=nindex))
def test__observation_valid_many(aggobs):
out = utils._observation_valid(
nindex, '09ed7cf6-ea0b-11e9-a7da-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(
[True, True, True, True, True, False, False, True, True, True],
index=nindex))
def test__observation_valid_deleted(aggobs):
with pytest.raises(ValueError):
utils._observation_valid(
nindex, '0fe9f2ba-ea0b-11e9-a7da-f4939feddd82', aggobs)
def test__observation_valid_deleted_before(aggobs):
out = utils._observation_valid(
nindex, '67ea9200-ea0e-11e9-832b-f4939feddd82', aggobs)
pdt.assert_series_equal(out, pd.Series(False, index=nindex))
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0745Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z',
'20191004T0800Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
])
def test__make_aggregate_index(length, label, expected):
test_data = {
0: pd.DataFrame(range(5), index=pd.date_range(
'20191004T0700Z', freq='7min', periods=5)), # end 35
1: pd.DataFrame(range(4), index=pd.date_range(
'20191004T0015-0700', freq='10min', periods=4))} # end 45
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0715Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0700Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z'])),
])
def test__make_aggregate_index_offset_right(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0701Z', freq='7min', periods=6)) # end 35
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
@pytest.mark.parametrize('length,label,expected', [
('15min', 'ending', pd.date_range(start='20191004T0700Z',
end='20191004T0745Z',
freq='15min')),
('15min', 'beginning', pd.date_range(
start='20191004T0645Z', end='20191004T0730Z',
freq='15min')),
('1h', 'ending', pd.DatetimeIndex(['20191004T0700Z', '20191004T0800Z'])),
('1h', 'beginning', pd.DatetimeIndex(['20191004T0600Z',
'20191004T0700Z'])),
('20min', 'ending', pd.DatetimeIndex([
'20191004T0700Z', '20191004T0720Z', '20191004T0740Z'])),
('20min', 'beginning', pd.DatetimeIndex([
'20191004T0640Z', '20191004T0700Z', '20191004T0720Z'])),
('36min', 'ending', pd.DatetimeIndex(['20191004T0712Z',
'20191004T0748Z'])),
('36min', 'beginning', pd.DatetimeIndex(['20191004T0636Z',
'20191004T0712Z'])),
])
def test__make_aggregate_index_offset_left(length, label, expected):
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20191004T0658Z', freq='7min', periods=6)) # end 32
}
out = utils._make_aggregate_index(test_data, length, label, 'UTC')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_tz():
length = '30min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T1600Z', freq='5min', periods=6)) # end 30
}
expected = pd.DatetimeIndex(['20190101T0900'],
tz='America/Denver')
out = utils._make_aggregate_index(test_data, length, label,
'America/Denver')
pdt.assert_index_equal(out, expected)
def test__make_aggregate_index_invalid_length():
length = '33min'
label = 'beginning'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0158Z', freq='7min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
def test__make_aggregate_index_instant():
length = '30min'
label = 'instant'
test_data = {
0: pd.DataFrame(range(6), index=pd.date_range(
'20190101T0100Z', freq='10min', periods=6)) # end 32
}
with pytest.raises(ValueError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('start,end', [
(pd.Timestamp('20190101T0000Z'), pd.Timestamp('20190102T0000')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000Z')),
(pd.Timestamp('20190101T0000'), pd.Timestamp('20190102T0000')),
])
def test__make_aggregate_index_localization(start, end):
length = '30min'
label = 'ending'
test_data = {
0: pd.DataFrame(range(1), index=pd.DatetimeIndex([start])),
1: pd.DataFrame(range(1), index=pd.DatetimeIndex([end])),
}
with pytest.raises(TypeError):
utils._make_aggregate_index(test_data, length, label, 'UTC')
@pytest.mark.parametrize('inp,oup', [
(pd.DataFrame(dtype=float), pd.Series(dtype=float)),
(pd.DataFrame(index=pd.DatetimeIndex([]), dtype=float),
pd.DataFrame(dtype=float)),
(pd.Series([0, 1]), pd.Series([0, 1])),
(pd.DataFrame([[0, 1], [1, 2]]), pd.DataFrame([[0, 1], [1, 2]])),
pytest.param(
| pd.Series([0, 1]) | pandas.Series |
import pandas as pd
import numpy as np
import re
from law.utils import *
import jieba.posseg as pseg
import datetime
import mysql.connector
class case_reader:
def __init__(self, user, password, n=1000, preprocessing=False):
'''
n is total types,
preprocessing: whether needs preprocessing
'''
# old version: use file_path
# self.file_path = file_path
# self.data = pd.read_csv(self.file_path, encoding='utf-8', engine='python')
# new version: directly reading data
# connect database
self.n = n
self.preprocessing = preprocessing
print("Connecting to Server...")
cnx = mysql.connector.connect(user=user, password=password,
host="cdb-74dx1ytr.gz.tencentcdb.com",
port=10008,
database='law')
cursor = cnx.cursor(buffered=True)
print("Server Connected.")
# read database
if n>=0:
query = 'SELECT * FROM Civil LIMIT ' + str(self.n) + ';'
else:
query = 'SELECT * FROM Civil;'
print("Start Reading Data...")
self.data = pd.read_sql(query,con=cnx)
print("Read Data Successful...")
self.data_len = len(self.data)
print("This dataset has ", self.data_len, "rows of data.")
# np.nan replace missing value
self.data = self.data.fillna(np.nan)
def return_data(self):
if self.preprocessing:
self.preprocess()
return self.data
def number2(self):
'''
This function change '庭审程序' into one hot encodings
-- Klaus
'''
xingfabiangeng = np.zeros(self.data_len)
yishen = np.zeros(self.data_len)
ershen = np.zeros(self.data_len)
fushen = np.zeros(self.data_len)
qita = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['proc'][i] == "刑罚变更":
xingfabiangeng[i] += 1
if self.data['proc'][i] == "一审":
yishen[i] += 1
if self.data['proc'][i] == "二审":
ershen[i] += 1
if self.data['proc'][i] == "复核":
fushen[i] += 1
if self.data['proc'][i] == "其他" :
qita[i] += 1
self.data['proc_是否_刑罚变更'] = xingfabiangeng
self.data['proc_是否_一审'] = yishen
self.data['proc_是否_二审'] = ershen
self.data['proc_是否_复核'] = fushen
self.data['proc_是否_其他'] = qita
#print(xingfabiangeng)
#print(yishen)
#print(ershen)
#print(qita)
del xingfabiangeng, yishen, ershen, fushen, qita
def number3(self):
'''
This function change '案由' into one hot encodings
'''
reasons = ['机动车交通事故责任纠纷' ,'物件损害责任纠纷' ,'侵权责任纠纷', '产品责任纠纷', '提供劳务者受害责任纠纷' ,'医疗损害责任纠纷',
'地面施工、地下设施损害责任纠纷', '饲养动物损害责任纠纷' ,'产品销售者责任纠纷', '因申请诉中财产保全损害责任纠纷', '教育机构责任纠纷',
'违反安全保障义务责任纠纷' , '网络侵权责任纠纷' ,'因申请诉前财产保全损害责任纠纷' ,'物件脱落、坠落损害责任纠纷',
'因申请诉中证据保全损害责任纠纷' ,'建筑物、构筑物倒塌损害责任纠纷' ,'提供劳务者致害责任纠纷' ,'产品生产者责任纠纷',
'公共场所管理人责任纠纷', '公证损害责任纠纷', '用人单位责任纠纷' ,'触电人身损害责任纠纷', '义务帮工人受害责任纠纷',
'高度危险活动损害责任纠纷', '噪声污染责任纠纷' ,'堆放物倒塌致害责任纠纷', '公共道路妨碍通行损害责任纠纷' ,'见义勇为人受害责任纠纷',
'医疗产品责任纠纷' ,'监护人责任纠纷', '水上运输人身损害责任纠纷', '环境污染责任纠纷', '因申请先予执行损害责任纠纷',
'铁路运输人身损害责任纠纷' ,'水污染责任纠纷', '林木折断损害责任纠纷', '侵害患者知情同意权责任纠纷' ,'群众性活动组织者责任纠纷',
'土壤污染责任纠纷']
mreason = np.zeros(self.data_len)
for i in range(self.data_len):
for j,reason in enumerate(reasons):
if self.data['class'][i] == reasons[j]:
mreason[i] +=j+1
self.data['class_index'] = mreason
del mreason
def number4(self):
'''
This function change '文书类型' into one hot encodings
'''
panjueshu = np.zeros(self.data_len)
caidingshu = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['doc_type'][i] == "判决书":
panjueshu[i] += 1
if self.data['doc_type'][i] == "裁定书":
caidingshu[i] += 1
self.data['doc_type'] = panjueshu
self.data['doc_type'] = caidingshu
del panjueshu, caidingshu
def number5(self):
'''
court → province、city、level
-- <NAME>
'''
level = [] # court level
distinct = [] # province
block = [] # city
for x in self.data['court_name']:
if pd.isna(x):#if empty
level.append(None)
distinct.append(None)
block.append(None)
else:
# search “省”(province)
a = re.compile(r'.*省')
b = a.search(x)
if b == None:
distinct.append(None)
else:
distinct.append(b.group(0))
x = re.sub(b.group(0), '', x)
# search "市"(city)
a = re.compile(r'.*市')
b = a.search(x)
if b == None:
block.append(None)
else:
block.append(b.group(0))
# search“级”(level)
a = re.compile(r'.级')
b = a.search(x)
if b == None:
level.append(None)
else:
level.append(b.group(0))
newdict = {
'法院所在省': distinct,
'法院所在市': block,
'法院等级': level
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del newdata, level, distinct, block
def number6(self):
'''
分成年月日
:return:
'''
year = []
month = []
day = []
for x in self.data['date']:
# year
a = re.compile(r'.*年')
b = a.search(str(x))
if b == None:
year.append(None)
else:
year.append(b.group(0))
x = re.sub(b.group(0), '', x)
# month
a1 = re.compile(r'.*月')
b1 = a1.search(str(x))
if b1 == None:
month.append(None)
else:
month.append(b1.group(0))
x = re.sub(b1.group(0), '', x)
# day
a2 = re.compile(r'.*日')
b2 = a2.search(str(x))
if b2 == None:
day.append(None)
else:
day.append(b2.group(0))
newdict = {
'判决年份': year,
'判决月份': month,
'判决日期': day
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del year, month, day
def number7(self): # 四列 one hot 检察院,法人,自然人,其他
'''
--<NAME>
'''
self.data['原告_是否_检察院'] = 0
self.data['原告_是否_法人'] = 0
self.data['原告_是否_自然人'] = 0
self.data['原告_是否_其他'] = 0
pattern = r'(?::|:|。|、|\s|,|,)\s*'
jcy_pattern = re.compile(r'.*检察院')
gs_pattern = re.compile(r'.*公司')
for i in range(len(self.data['plantiff'])):
if pd.isna(self.data['plantiff'][i]):
continue
self.data['plantiff'][i] = re.sub(' ', '', self.data['plantiff'][i])
result_list = re.split(pattern, self.data['plantiff'][i])
for x in result_list:
temp1 = jcy_pattern.findall(x)
temp2 = gs_pattern.findall(x)
if len(temp1) != 0:
self.data['原告_是否_检察院'][i] = 1
if (0 < len(x) <= 4):
self.data['原告_是否_自然人'][i] = 1
if ((len(temp1) != 0) or len(temp2) != 0):
self.data['原告_是否_法人'][i] = 1
if (len(x) > 4 and len(temp1) == 0 and len(temp2) == 0):
self.data['原告_是否_其他'][i] = 1
def number8(self):
# http://www.sohu.com/a/249531167_656612
company = re.compile(r'.*?公司')
natural_person = np.zeros(self.data_len)
legal_person = np.zeros(self.data_len)
other_person = np.zeros(self.data_len)
for i in range(self.data_len):
# 显示进度
#if i % 100 == 0:
# print(i)
if | pd.isna(self.data['defendant'][i]) | pandas.isna |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: stock_wrapper.py
@time: 2019-08-11 22:57
"""
from datetime import datetime, timedelta
import pandas as pd
import tushare as ts
def getColnames(s):
"""
常见的copy网页上的表格时,处理利用
s = \"""ts_code str TS股票代码
trade_date str 交易日期
close float 当日收盘价
turnover_rate float 换手率(%)
turnover_rate_f float 换手率(自由流通股)
circ_mv float 流通市值(万元)\"""
:param s:
:return:
'ts_code,trade_date...,total_mv,circ_mv'
['TS股票代码', ...'流通市值(万元)']
"""
return ','.join([i.split()[0] for i in s.split('\n')]), [i.split()[2] for i in s.split('\n')]
def df2dicts(df):
"""
df to dicts list
"""
dicts = []
for line in df.itertuples():
ll = list(df.columns)
dicts.append(dict(zip(ll, list(line)[1:])))
return dicts
def df2dicts_stock(df):
"""
df to dicts list
"""
dicts = []
for line in df.itertuples():
ll = ['trade_date'] + list(df.columns)
dicts.append(dict(zip(ll, [line[0]]+list(line)[1:])))
return dicts
class TushareWrapper:
def __init__(self):
TS_TOKEN = '5fd1639100f8a22b7f86e882e03192009faa72bae1ae93803e1172d5'
self._pro = ts.pro_api(TS_TOKEN)
def get_tushare_pro(self):
return self._pro
def get_stocks_info(self):
"""
查询当前所有正常上市交易的股票列表
:return:
"""
data = self._pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
return data
def daily(self, trade_date):
"""
获取某一日全部股票的数据
:param trade_date:
:return:
"""
return self._pro.daily(trade_date=trade_date)
def history(self, ts_code, start_date, end_date, asset='E', adj='qfq'):
"""
获取某只股票的历史行情数据
由于ts的接口一次只能获取1800个交易日(一年大概有250个交易日。约7年)的数据
:param ts_code:
:param start_date: str
:param end_date: str
:param asset: 资产类别:E股票 I沪深指数 C数字货币 FT期货 FD基金 O期权 CB可转债(v1.2.39),默认E
:param adj: 复权类型(只针对股票):None未复权 qfq前复权 hfq后复权 , 默认None
:return:
"""
pro = self._pro
startdate = datetime.strptime(start_date, '%Y%m%d')
enddate = datetime.strptime(end_date, '%Y%m%d')
df = pd.DataFrame()
while enddate.year - startdate.year > 6:
print(startdate.strftime('%Y%m%d'),
(startdate.replace(year=(startdate.year + 6)) - timedelta(days=1)).strftime('%Y%m%d'))
params = {'ts_code': ts_code,
'start_date': startdate.strftime('%Y%m%d'),
'asset': asset,
'api': self._pro,
'end_date': (startdate.replace(year=(startdate.year + 6)) - timedelta(days=1)).strftime('%Y%m%d'),
'adj': adj}
# if mode == 'index':
# t = pro.index_daily(**params)
# elif mode == 'stock':
# t = pro.daily(**params)
#
# elif mode == 'fund':
# t = pro.fund_daily(**params)
t = ts.pro_bar(**params)
if not df.empty:
df = pd.concat([df, t], axis=0, ignore_index=True)
else:
df = t
startdate = startdate.replace(year=(startdate.year + 6))
else:
print(startdate.strftime('%Y%m%d'), end_date)
params = {'ts_code': ts_code,
'start_date': startdate.strftime('%Y%m%d'),
'asset': asset,
'api': self._pro,
'end_date': end_date,
'adj': adj}
# if mode == 'index':
# t = pro.index_daily(**params)
# elif mode == 'stock':
# t = pro.daily(**params)
# elif mode == 'fund':
# t = pro.fund_daily(**params)
t = ts.pro_bar(**params)
if not df.empty:
df = pd.concat([df, t], axis=0, ignore_index=True)
else:
df = t
df = df.sort_values('trade_date')
df['trade_date'] = pd.to_datetime(df['trade_date'])
df.set_index('trade_date', inplace=True)
return df
def get_daily_basic(self, t_date):
f = 'ts_code,trade_date,close,turnover_rate,turnover_rate_f,volume_ratio,pe,pe_ttm,pb,ps,ps_ttm,total_share,float_share,free_share,total_mv,circ_mv'
df = self._pro.daily_basic(ts_code='', trade_date=t_date, fields=f)
df.columns = [
'ts_code',
'trade_date',
'close',
'换手率(%)',
'换手率(自由流通股)',
'量比',
'市盈率(总市值/净利润)',
'市盈率(TTM)',
'市净率(总市值/净资产)',
'市销率',
'市销率(TTM)',
'总股本',
'流通股本',
'自由流通股本',
'总市值',
'流通市值(万元)'
]
if not df.empty:
print(f'请求到{len(df)}条数据!')
df.trade_date = | pd.to_datetime(df.trade_date) | pandas.to_datetime |
"""
2018 <NAME>
9.tcga-classify/classify-with-raw-expression.py
Predict if specific genes are mutated across TCGA tumors based on raw RNAseq gene
expression features. Also make predictions on cancer types using raw gene expression.
Usage:
python classify-with-raw-expression.py
Output:
Gene specific DataFrames storing ROC, precision-recall, and classifier coefficients
for raw gene expression models trained in their ability to predict mutations. The genes
used are the top 50 most mutated genes in TCGA PanCanAtlas. A gene was considered
mutated if a non-silent mutation was observed by the MC3 mutation calling effort. An
additional metrics file that stores all gene AUROC and AUPR is also saved. We also save
predictions and results for cancer-types.
"""
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from scripts.tcga_util import (
get_threshold_metrics,
summarize_results,
extract_coefficients,
align_matrices,
process_y_matrix,
train_model,
process_y_matrix_cancertype,
check_status,
)
np.random.seed(123)
# Load constants
filter_prop = 0.05
filter_count = 15
folds = 5
num_features = 8000
max_iter = 100
seed = "123"
algorithm = "raw"
alphas = [0.1, 0.13, 0.15, 0.2, 0.25, 0.3]
l1_ratios = [0.15, 0.16, 0.2, 0.25, 0.3, 0.4]
# Load genes
file = os.path.join("data", "top50_mutated_genes.tsv")
genes_df = pd.read_table(file)
# Load data to build x and y matrices
base_url = "https://github.com/greenelab/pancancer/raw"
commit = "2a0683b68017fb226f4053e63415e4356191734f"
file = "{}/{}/data/sample_freeze.tsv".format(base_url, commit)
sample_freeze_df = pd.read_table(file, index_col=0)
file = "{}/{}/data/pancan_mutation_freeze.tsv.gz".format(base_url, commit)
mutation_df = pd.read_table(file, index_col=0)
file = "{}/{}/data/copy_number_loss_status.tsv.gz".format(base_url, commit)
copy_loss_df = pd.read_table(file, index_col=0)
file = "{}/{}/data/copy_number_gain_status.tsv.gz".format(base_url, commit)
copy_gain_df = pd.read_table(file, index_col=0)
file = "{}/{}/data/mutation_burden_freeze.tsv".format(base_url, commit)
mut_burden_df = pd.read_table(file, index_col=0)
# Load and process X matrix
base_dir = os.path.join("..", "0.expression-download", "data")
x_train_file = os.path.join(base_dir, "train_tcga_expression_matrix_processed.tsv.gz")
x_test_file = os.path.join(base_dir, "test_tcga_expression_matrix_processed.tsv.gz")
rnaseq_train_df = pd.read_table(x_train_file, index_col=0)
rnaseq_test_df = pd.read_table(x_test_file, index_col=0)
# Determine most variably expressed genes and subset X matrix
mad_file = os.path.join(base_dir, "tcga_mad_genes.tsv")
mad_genes_df = pd.read_table(mad_file)
mad_genes = mad_genes_df.iloc[0:num_features, ].gene_id.astype(str)
rnaseq_train_df = rnaseq_train_df.reindex(mad_genes, axis="columns")
rnaseq_test_df = rnaseq_test_df.reindex(mad_genes, axis="columns")
# Scale RNAseq matrix the same way RNAseq was scaled for compression algorithms
train_fitted_scaler = MinMaxScaler().fit(rnaseq_train_df)
rnaseq_train_df = pd.DataFrame(
train_fitted_scaler.transform(rnaseq_train_df),
columns=rnaseq_train_df.columns,
index=rnaseq_train_df.index,
)
test_fitted_scaler = MinMaxScaler().fit(rnaseq_test_df)
rnaseq_test_df = pd.DataFrame(
test_fitted_scaler.transform(rnaseq_test_df),
columns=rnaseq_test_df.columns,
index=rnaseq_test_df.index,
)
# Track total metrics for each gene in one file
metric_cols = [
"auroc",
"aupr",
"gene_or_cancertype",
"signal",
"z_dim",
"seed",
"algorithm",
"data_type",
]
for gene_idx, gene_series in genes_df.iterrows():
gene_name = gene_series.gene
classification = gene_series.classification
# Create list to store gene specific results
gene_auc_list = []
gene_aupr_list = []
gene_coef_list = []
gene_metrics_list = []
# Create directory for the gene
gene_dir = os.path.join("results", "mutation", gene_name)
os.makedirs(gene_dir, exist_ok=True)
# Check if gene has been processed already
check_file = os.path.join(gene_dir, "{}_raw_coefficients.tsv.gz".format(gene_name))
if check_status(check_file):
continue
# Process the y matrix for the given gene or pathway
y_mutation_df = mutation_df.loc[:, gene_name]
# Include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = copy_gain_df.loc[:, gene_name]
elif classification == "TSG":
y_copy_number_df = copy_loss_df.loc[:, gene_name]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
y_df = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene_name,
sample_freeze=sample_freeze_df,
mutation_burden=mut_burden_df,
filter_count=filter_count,
filter_prop=filter_prop,
output_directory=gene_dir,
hyper_filter=5,
)
for signal in ["signal", "shuffled"]:
if signal == "shuffled":
# Shuffle training data
x_train_raw_df = rnaseq_train_df.apply(
lambda x: np.random.permutation(x.tolist()),
axis=1,
result_type="expand",
)
x_train_raw_df.columns = rnaseq_train_df.columns
x_train_raw_df.index = rnaseq_train_df.index
# Shuffle testing data
x_test_raw_df = rnaseq_test_df.apply(
lambda x: np.random.permutation(x.tolist()),
axis=1,
result_type="expand",
)
x_test_raw_df.columns = rnaseq_test_df.columns
x_test_raw_df.index = rnaseq_test_df.index
else:
x_train_raw_df = rnaseq_train_df
x_test_raw_df = rnaseq_test_df
# Now, perform all the analyses for each X matrix
train_samples, x_train_df, y_train_df = align_matrices(
x_file_or_df=x_train_raw_df, y=y_df
)
test_samples, x_test_df, y_test_df = align_matrices(
x_file_or_df=x_test_raw_df, y=y_df
)
# Train the model
print(
"Training model... gene: {}, for raw {} features".format(gene_name, signal)
)
# Fit the model
cv_pipeline, y_pred_train_df, y_pred_test_df, y_cv_df = train_model(
x_train=x_train_df,
x_test=x_test_df,
y_train=y_train_df,
alphas=alphas,
l1_ratios=l1_ratios,
n_folds=folds,
max_iter=max_iter,
)
# Get metric predictions
y_train_results = get_threshold_metrics(
y_train_df.status, y_pred_train_df, drop=False
)
y_test_results = get_threshold_metrics(
y_test_df.status, y_pred_test_df, drop=False
)
y_cv_results = get_threshold_metrics(y_train_df.status, y_cv_df, drop=False)
# Get coefficients
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=x_train_df.columns,
signal=signal,
z_dim=num_features,
seed=seed,
algorithm=algorithm,
)
coef_df = coef_df.assign(gene=gene_name)
# Store all results
train_metrics_, train_roc_df, train_pr_df = summarize_results(
y_train_results, gene_name, signal, num_features, seed, algorithm, "train"
)
test_metrics_, test_roc_df, test_pr_df = summarize_results(
y_test_results, gene_name, signal, num_features, seed, algorithm, "test"
)
cv_metrics_, cv_roc_df, cv_pr_df = summarize_results(
y_cv_results, gene_name, signal, num_features, seed, algorithm, "cv"
)
# Compile summary metrics
metrics_ = [train_metrics_, test_metrics_, cv_metrics_]
metric_df_ = pd.DataFrame(metrics_, columns=metric_cols)
gene_metrics_list.append(metric_df_)
gene_auc_df = pd.concat([train_roc_df, test_roc_df, cv_roc_df])
gene_auc_list.append(gene_auc_df)
gene_aupr_df = pd.concat([train_pr_df, test_pr_df, cv_pr_df])
gene_aupr_list.append(gene_aupr_df)
gene_coef_list.append(coef_df)
gene_auc_df = pd.concat(gene_auc_list)
gene_aupr_df = pd.concat(gene_aupr_list)
gene_coef_df = pd.concat(gene_coef_list)
gene_metrics_df = pd.concat(gene_metrics_list)
file = os.path.join(
gene_dir, "{}_raw_auc_threshold_metrics.tsv.gz".format(gene_name)
)
gene_auc_df.to_csv(
file, sep="\t", index=False, compression="gzip", float_format="%.5g"
)
file = os.path.join(
gene_dir, "{}_raw_aupr_threshold_metrics.tsv.gz".format(gene_name)
)
gene_aupr_df.to_csv(
file, sep="\t", index=False, compression="gzip", float_format="%.5g"
)
gene_coef_df.to_csv(
check_file, sep="\t", index=False, compression="gzip", float_format="%.5g"
)
file = os.path.join(gene_dir, "{}_raw_classify_metrics.tsv.gz".format(gene_name))
gene_metrics_df.to_csv(
file, sep="\t", index=False, compression="gzip", float_format="%.5g"
)
# Provide one vs all classifications for all 33 different cancertypes in TCGA
# Track total metrics for each cancer-type in one file
count_list = []
for acronym in sample_freeze_df.DISEASE.unique():
# Create list to store cancer-type specific results
cancertype_auc_list = []
cancertype_aupr_list = []
cancertype_coef_list = []
cancertype_metrics_list = []
# Create directory for the cancer-type
cancertype_dir = os.path.join("results", "cancer-type", acronym)
os.makedirs(cancertype_dir, exist_ok=True)
y_df, count_df = process_y_matrix_cancertype(
acronym=acronym,
sample_freeze=sample_freeze_df,
mutation_burden=mut_burden_df,
hyper_filter=5,
)
# Track the status counts of all classifiers
count_list.append(count_df)
# Check if cancer-type has been processed already
check_file = (
os.path.join(cancertype_dir, "{}_raw_coefficients.tsv.gz".format(acronym))
)
if check_status(check_file):
continue
# Now, perform all the analyses for each X matrix
for signal in ["signal", "shuffled"]:
if signal == "shuffled":
# Shuffle training data
x_train_raw_df = rnaseq_train_df.apply(
lambda x: np.random.permutation(x.tolist()),
axis=1,
result_type="expand",
)
x_train_raw_df.columns = rnaseq_train_df.columns
x_train_raw_df.index = rnaseq_train_df.index
# Shuffle testing data
x_test_raw_df = rnaseq_test_df.apply(
lambda x: np.random.permutation(x.tolist()),
axis=1,
result_type="expand",
)
x_test_raw_df.columns = rnaseq_test_df.columns
x_test_raw_df.index = rnaseq_test_df.index
else:
x_train_raw_df = rnaseq_train_df
x_test_raw_df = rnaseq_test_df
# Now, perform all the analyses for each X matrix
train_samples, x_train_df, y_train_df = align_matrices(
x_file_or_df=x_train_raw_df,
y=y_df,
add_cancertype_covariate=False
)
test_samples, x_test_df, y_test_df = align_matrices(
x_file_or_df=x_test_raw_df,
y=y_df,
add_cancertype_covariate=False
)
# Train the model
print(
"Training model... cancertype: {}, for raw {} features".format(acronym,
signal)
)
# Fit the model
cv_pipeline, y_pred_train_df, y_pred_test_df, y_cv_df = train_model(
x_train=x_train_df,
x_test=x_test_df,
y_train=y_train_df,
alphas=alphas,
l1_ratios=l1_ratios,
n_folds=folds,
max_iter=max_iter,
)
# Get metric predictions
y_train_results = get_threshold_metrics(
y_train_df.status, y_pred_train_df, drop=False
)
y_test_results = get_threshold_metrics(
y_test_df.status, y_pred_test_df, drop=False
)
y_cv_results = get_threshold_metrics(y_train_df.status, y_cv_df, drop=False)
# Get coefficients
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=x_train_df.columns,
signal=signal,
z_dim=num_features,
seed=seed,
algorithm=algorithm,
)
coef_df = coef_df.assign(acronym=acronym)
# Store all results
train_metrics_, train_roc_df, train_pr_df = summarize_results(
results=y_train_results,
gene_or_cancertype=acronym,
signal=signal,
z_dim=num_features,
seed=seed,
algorithm=algorithm,
data_type="train",
)
test_metrics_, test_roc_df, test_pr_df = summarize_results(
results=y_test_results,
gene_or_cancertype=acronym,
signal=signal,
z_dim=num_features,
seed=seed,
algorithm=algorithm,
data_type="test",
)
cv_metrics_, cv_roc_df, cv_pr_df = summarize_results(
results=y_cv_results,
gene_or_cancertype=acronym,
signal=signal,
z_dim=num_features,
seed=seed,
algorithm=algorithm,
data_type="cv"
)
# Compile summary metrics
metrics_ = [train_metrics_, test_metrics_, cv_metrics_]
metric_df_ = pd.DataFrame(metrics_, columns=metric_cols)
cancertype_metrics_list.append(metric_df_)
auc_df = | pd.concat([train_roc_df, test_roc_df, cv_roc_df]) | pandas.concat |
from typing import Optional, Union, List, Tuple, Dict, Any
from pandas.core.common import apply_if_callable
from pandas.core.construction import extract_array
import pandas_flavor as pf
import pandas as pd
import functools
from pandas.api.types import is_list_like, is_scalar, is_categorical_dtype
from janitor.utils import check, check_column
from janitor.functions.utils import _computations_expand_grid
@pf.register_dataframe_method
def complete(
df: pd.DataFrame,
*columns,
sort: bool = False,
by: Optional[Union[list, str]] = None,
fill_value: Optional[Union[Dict, Any]] = None,
explicit: bool = True,
) -> pd.DataFrame:
"""
It is modeled after tidyr's `complete` function, and is a wrapper around
[`expand_grid`][janitor.functions.expand_grid.expand_grid], `pd.merge`
and `pd.fillna`. In a way, it is the inverse of `pd.dropna`, as it exposes
implicitly missing rows.
Combinations of column names or a list/tuple of column names, or even a
dictionary of column names and new values are possible.
MultiIndex columns are not supported.
Example:
>>> import pandas as pd
>>> import janitor
>>> import numpy as np
>>> df = pd.DataFrame(
... {
... "Year": [1999, 2000, 2004, 1999, 2004],
... "Taxon": [
... "Saccharina",
... "Saccharina",
... "Saccharina",
... "Agarum",
... "Agarum",
... ],
... "Abundance": [4, 5, 2, 1, 8],
... }
... )
>>> df
Year Taxon Abundance
0 1999 Saccharina 4
1 2000 Saccharina 5
2 2004 Saccharina 2
3 1999 Agarum 1
4 2004 Agarum 8
Expose missing pairings of `Year` and `Taxon`:
>>> df.complete("Year", "Taxon", sort=True)
Year Taxon Abundance
0 1999 Agarum 1.0
1 1999 Saccharina 4.0
2 2000 Agarum NaN
3 2000 Saccharina 5.0
4 2004 Agarum 8.0
5 2004 Saccharina 2.0
Expose missing years from 1999 to 2004 :
>>> df.complete(
... {"Year": range(df.Year.min(), df.Year.max() + 1)},
... "Taxon",
... sort=True
... )
Year Taxon Abundance
0 1999 Agarum 1.0
1 1999 Saccharina 4.0
2 2000 Agarum NaN
3 2000 Saccharina 5.0
4 2001 Agarum NaN
5 2001 Saccharina NaN
6 2002 Agarum NaN
7 2002 Saccharina NaN
8 2003 Agarum NaN
9 2003 Saccharina NaN
10 2004 Agarum 8.0
11 2004 Saccharina 2.0
Fill missing values:
>>> df = pd.DataFrame(
... dict(
... group=(1, 2, 1, 2),
... item_id=(1, 2, 2, 3),
... item_name=("a", "a", "b", "b"),
... value1=(1, np.nan, 3, 4),
... value2=range(4, 8),
... )
... )
>>> df
group item_id item_name value1 value2
0 1 1 a 1.0 4
1 2 2 a NaN 5
2 1 2 b 3.0 6
3 2 3 b 4.0 7
>>> df.complete(
... "group",
... ("item_id", "item_name"),
... fill_value={"value1": 0, "value2": 99},
... sort=True
... )
group item_id item_name value1 value2
0 1 1 a 1 4
1 1 2 a 0 99
2 1 2 b 3 6
3 1 3 b 0 99
4 2 1 a 0 99
5 2 2 a 0 5
6 2 2 b 0 99
7 2 3 b 4 7
Limit the fill to only implicit missing values
by setting explicit to `False`:
>>> df.complete(
... "group",
... ("item_id", "item_name"),
... fill_value={"value1": 0, "value2": 99},
... explicit=False,
... sort=True
... )
group item_id item_name value1 value2
0 1 1 a 1.0 4.0
1 1 2 a 0.0 99.0
2 1 2 b 3.0 6.0
3 1 3 b 0.0 99.0
4 2 1 a 0.0 99.0
5 2 2 a NaN 5.0
6 2 2 b 0.0 99.0
7 2 3 b 4.0 7.0
:param df: A pandas DataFrame.
:param *columns: This refers to the columns to be
completed. It could be column labels (string type),
a list/tuple of column labels, or a dictionary that pairs
column labels with new values.
:param sort: Sort DataFrame based on *columns. Default is `False`.
:param by: label or list of labels to group by.
The explicit missing rows are returned per group.
:param fill_value: Scalar value to use instead of NaN
for missing combinations. A dictionary, mapping columns names
to a scalar value is also accepted.
:param explicit: Determines if only implicitly missing values
should be filled (`False`), or all nulls existing in the dataframe
(`True`). Default is `True`. `explicit` is applicable only
if `fill_value` is not `None`.
:returns: A pandas DataFrame with explicit missing rows, if any.
"""
if not columns:
return df
df = df.copy()
return _computations_complete(df, columns, sort, by, fill_value, explicit)
def _computations_complete(
df: pd.DataFrame,
columns: List[Union[List, Tuple, Dict, str]],
sort: bool,
by: Optional[Union[list, str]],
fill_value: Optional[Union[Dict, Any]],
explicit: bool,
) -> pd.DataFrame:
"""
This function computes the final output for the `complete` function.
If `by` is present, then `groupby().apply()` is used.
A DataFrame, with rows of missing values, if any, is returned.
"""
(
columns,
column_checker,
sort,
by,
fill_value,
explicit,
) = _data_checks_complete(df, columns, sort, by, fill_value, explicit)
all_strings = True
for column in columns:
if not isinstance(column, str):
all_strings = False
break
# nothing to 'complete' here
if (all_strings and len(columns) == 1) or df.empty:
return df
# under the right conditions, stack/unstack can be faster
# plus it always returns a sorted DataFrame
# which does help in viewing the missing rows
# however, using a merge keeps things simple
# with a stack/unstack,
# the relevant columns combination should be unique
# and there should be no nulls
# trade-off for the simplicity of merge is not so bad
# of course there could be a better way ...
if by is None:
uniques = _generic_complete(df, columns, all_strings, sort)
else:
uniques = df.groupby(by)
uniques = uniques.apply(_generic_complete, columns, all_strings, sort)
uniques = uniques.droplevel(-1)
column_checker = by + column_checker
columns = df.columns
indicator = False
if fill_value is not None and not explicit:
# to get a name that does not exist in the columns
indicator = "".join(columns)
df = pd.merge(
uniques,
df,
how="outer",
on=column_checker,
copy=False,
sort=False,
indicator=indicator,
)
if fill_value is not None:
if is_scalar(fill_value):
# faster when fillna operates on a Series basis
fill_value = {
col: fill_value for col in columns if df[col].hasnans
}
if explicit:
df = df.fillna(fill_value, downcast="infer")
else:
# keep only columns that are not part of column_checker
# IOW, we are excluding columns that were not used
# to generate the combinations
fill_value = {
col: value
for col, value in fill_value.items()
if col not in column_checker
}
if fill_value:
# when explicit is False
# use the indicator parameter to identify rows
# for `left_only`, and fill the relevant columns in fill_value
# with the associated value.
boolean_filter = df.loc[:, indicator] == "left_only"
df = df.drop(columns=indicator)
# iteration used here,
# instead of assign (which is also a for loop),
# to cater for scenarios where the column_name is not a string
# assign only works with keys that are strings
# Also, the output wil be floats (for numeric types),
# even if all the columns could be integers
# user can always convert to int if required
for column_name, value in fill_value.items():
# for categorical dtypes, set the categories first
if is_categorical_dtype(df[column_name]):
df[column_name] = df[column_name].cat.add_categories(
[value]
)
df.loc[boolean_filter, column_name] = value
if not df.columns.equals(columns):
return df.reindex(columns=columns)
return df
def _generic_complete(
df: pd.DataFrame, columns: list, all_strings: bool, sort: bool
):
"""
Generate cartesian product for `_computations_complete`.
Returns a DataFrame, with no duplicates.
"""
if all_strings:
if sort:
uniques = {}
for col in columns:
column = | extract_array(df[col], extract_numpy=True) | pandas.core.construction.extract_array |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = | Timestamp('nat') | pandas.Timestamp |
###########################
### SOLVING FOR iJL208 ###
###########################
import json
import pandas as pd
import pulp
import itertools
import pdb
import re
import os
from tqdm import tqdm
def build_MIP_by_Cobrapy(model, growth_rate, essential_genes_file, parameters_file, regulator_genes_file, TU_Json_file, out_path='../data/minGenome', verbose=False, solver='CPLEX', iterations=10):
M = 1000
#Change variable names to comply former names
me = model
mu = growth_rate
eg_f = essential_genes_file
parameters_f = parameters_file
reg_f = regulator_genes_file
############# sets ################################
# TU
with open(TU_Json_file) as data_file:
TUs = json.load(data_file)
# essential genes
essential_genes = | pd.read_csv(eg_f,index_col=0) | pandas.read_csv |
import re
import os
import datetime
import pandas as pd
import numpy as np
from glob import glob
import warnings
import news._news_yh as news_yh
import visualization._plot_method as senti_ploter
import processor._automail as automail
import processor._senti_process as senti_process
import statistics._twitter_stats as twitter_stats
import processor._load_intraday as load_intraday
warnings.simplefilter("ignore")
def analysis_ticker(keyword_list,is_save_senti,is_plot,is_log,is_earning_release,is_stockprice,is_preopen,is_sendemail,email_addrs_list,ticker,flr_thres):
for key_word in keyword_list:
####set path
keyword_path = f"data\\raw_twitters\\{key_word}\\" # where the raw twitters are stored
ticker = key_word.split('$')[-1] # overwrite the ticker name
# read all files
files=glob(f'{keyword_path}*{key_word}*')
#if only need to run the program pre open time, which limit the time from last day 4:00pm to next day 9:30am
if is_preopen:
files = files[-2:]
# see all files'dates
dates = [i[-14:-4] for i in files]
print(f'We are observing data from {dates[0]} to {dates[-1]} for {key_word}')
# get all sentiment from all files, each file represent a day
all_sentiments = senti_process.SentiProcess(key_word).get_all_senti(files,flr_thres,is_log,is_save_senti)
###################################
#twitter_stats.show_top(result_path,key_word,topn,is_show_topwds)
#plot #####################################################
if is_plot:
senti_ploter.plot_senti(key_word,ticker,all_sentiments,is_stockprice,is_earning_release)
# statits
#twitter_stats.observe_annoucement(ticker,all_sentiments)
#twi_daily = twitter_stats.daily_tweets(all_sentiments)
if is_preopen:
twitter_stats.pre_opening_analysis(keyword_list,flr_thres)
automail.SendEmail(toaddr = email_addrs_list).send_preopen_email()
if not is_preopen and is_sendemail:
automail.SendEmail(toaddr = email_addrs_list).send_regular_email()
pass
def analysis_news(kw_list,ticker,readname):
# get all sentiment from all files, each file represent a day
all_sentis = senti_process.SentiProcess.analysis_news(kw_list,readname)
#plot #####################################################
hourly_ohlc = load_intraday.get_hourly_price(ticker)
senti_ploter.plot_news(hourly_ohlc,all_sentis)
pass
def analysis_macro(filename):
#past half tweets from those accounts
macro_tweet = | pd.read_csv(f'data\\macro\\{filename}.csv') | pandas.read_csv |
import plotly.graph_objs as go
from plotly.colors import n_colors
# to handle data retrieval
import urllib3
from urllib3 import request
# to handle certificate verification
import certifi
# to manage json data
import json
# for pandas dataframes
import pandas as pd
base_url = 'https://www.opendata.nhs.scot/api/3/action/datastore_search_sql?sql='
#Neighbourhoods
nbh_id = '8906de12-f413-4b3f-95a0-11ed15e61773'
#Daily Cases total
daily_id = '287fc645-4352-4477-9c8c-55bc054b7e76'
# Cases by local authority
by_hb_id = '427f9a25-db22-4014-a3bc-893b68243055'
# Hospital Onset
hosp_id = '5acbccb1-e9d6-4ab2-a7ac-f3e4d378e7ec'
def get_api(sql_query):
base_url = 'https://www.opendata.nhs.scot/api/3/action/datastore_search_sql?sql='
# handle certificate verification and SSL warnings
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
final_url = base_url + sql_query
r = http.request('GET', final_url)
# decode json data into a dict object
data = json.loads(r.data.decode('utf-8'))
df = | pd.json_normalize(data['result']['records']) | pandas.json_normalize |
import pandas as pd
import numpy as np
import pytest
from column_completer import ColumnCompleter
X = np.random.randint(0, 100, (8, 3))
def test_name_collision_value_error_1():
df = pd.DataFrame(X, columns=["Col A", "Col_A", "Col B"])
with pytest.raises(ValueError) as err:
q = ColumnCompleter(df)
assert "spaces causes a collision of column names" in str(err.value)
def test_attribute_space_replaced_1():
df = | pd.DataFrame(X, columns=["Col A", "col B", "Col C"]) | pandas.DataFrame |
import requests
from requests.exceptions import HTTPError
import json
import pandas
import xmltodict
from pathlib import Path
import xml.etree.ElementTree as ET
import re
# ---------------------------------------------------------------------------- #
# Common Variables #
# ---------------------------------------------------------------------------- #
nrmp_ein = 362249886
nbme_ein = 231352238
nbome_ein = 364135679
aamc_ein = 362169124
ama_ein = 360727175
pp_search_url = 'https://projects.propublica.org/nonprofits/api/v2/search.json'
pp_org_url = 'https://projects.propublica.org/nonprofits/api/v2/organizations/{}.json'
# ----------------------------- API Interactions ----------------------------- #
# Test for an HTTP response and if not "200", throw error.
def get_http_response(url):
try:
response = requests.get(url)
response.raise_for_status()
except HTTPError as http_error:
print(f'HTTP Error: {http_error}')
except Exception as error:
print(f'Non-HTTP error: {error}')
else:
print(f'Succesful HTTP response: {response}')
return response
# Get organization data as JSON
def get_org_data_as_json(ein):
url = pp_org_url.format(ein)
response = get_http_response(url).json()
return response
# ----------------------------- File Manipulation ---------------------------- #
# Save JSON to a file
def save_json_to_file(write_data, write_file):
with open(write_file, "w") as f:
json.dump(write_data, f)
# Read JSON from file
def read_json_from_file(read_file):
with open(read_file) as f:
read_data = json.load(f)
return read_data
# Read XML data and convert it to JSON data (optionally save it to JSON file)
def xml_to_json(xml_file, **kwargs):
json_file = kwargs.get('json_file', None)
with open(xml_file) as f:
xml_dict = xmltodict.parse(f.read())
xml_dict = xml_dict['Return']['ReturnData']
if json_file:
save_json_to_file(xml_dict, json_file)
else:
json_data = json.dumps(xml_dict)
return json_data
# ----------------------------- Data Manipulation ---------------------------- #
# Clean pandas dataframes of whitespace cells with no other data
def clean_df(dataframe):
df = dataframe.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
return df
# Read complete organization JSON file obtained from **PROPUBLICA API** and
# return filing data as pandas dataframe
def filing_data_from_org_json(filename, transpose=False):
json_data = read_json_from_file(filename)
filing_data = json_data['filings_with_data']
columns = filing_data[0].keys()
df = pandas.DataFrame(filing_data, columns=columns)
df = df.set_index('tax_prd_yr')
df = clean_df(df)
if transpose is True:
df = df.transpose()
return df
# Create dataframe object from IRS XML files by first flattening them
def xml_to_df(filename, transpose=False):
tree = ET.parse(filename)
root = tree.getroot()
parent_map = {}
for p in tree.iter():
for c in p:
url_pattern = re.compile(r"{(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?}")
tag_name = url_pattern.sub("", c.tag)
parent_map[tag_name] = [c.text]
df = | pandas.DataFrame(parent_map) | pandas.DataFrame |
import io
import numpy as np
import pytest
from pandas.compat._optional import VERSIONS
from pandas import (
DataFrame,
date_range,
read_csv,
read_excel,
read_feather,
read_json,
read_parquet,
read_pickle,
read_stata,
read_table,
)
import pandas._testing as tm
from pandas.util import _test_decorators as td
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
text = str(df1.to_csv(index=False)).encode()
@pytest.fixture
def cleared_fs():
fsspec = pytest.importorskip("fsspec")
memfs = fsspec.filesystem("memory")
yield memfs
memfs.store.clear()
def test_read_csv(cleared_fs):
with cleared_fs.open("test/test.csv", "wb") as w:
w.write(text)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
def test_reasonable_error(monkeypatch, cleared_fs):
from fsspec import registry
from fsspec.registry import known_implementations
registry.target.clear()
with pytest.raises(ValueError, match="nosuchprotocol"):
read_csv("nosuchprotocol://test/test.csv")
err_msg = "test error message"
monkeypatch.setitem(
known_implementations,
"couldexist",
{"class": "unimportable.CouldExist", "err": err_msg},
)
with pytest.raises(ImportError, match=err_msg):
read_csv("couldexist://test/test.csv")
def test_to_csv(cleared_fs):
df1.to_csv("memory://test/test.csv", index=True)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("ext", ["xls", "xlsx"])
def test_to_excel(cleared_fs, ext):
if ext == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
path = f"memory://test/test.{ext}"
df1.to_excel(path, index=True)
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("binary_mode", [False, True])
def test_to_csv_fsspec_object(cleared_fs, binary_mode):
fsspec = pytest.importorskip("fsspec")
path = "memory://test/test.csv"
mode = "wb" if binary_mode else "w"
fsspec_object = fsspec.open(path, mode=mode).open()
df1.to_csv(fsspec_object, index=True)
assert not fsspec_object.closed
fsspec_object.close()
mode = mode.replace("w", "r")
fsspec_object = fsspec.open(path, mode=mode).open()
df2 = read_csv(
fsspec_object,
parse_dates=["dt"],
index_col=0,
)
assert not fsspec_object.closed
fsspec_object.close()
tm.assert_frame_equal(df1, df2)
def test_csv_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
def test_read_table_options(fsspectest):
# GH #39167
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_table("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
@pytest.mark.parametrize("extension", ["xlsx", "xls"])
def test_excel_options(fsspectest, extension):
if extension == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
df = DataFrame({"a": [0]})
path = f"testmem://test/test.{extension}"
df.to_excel(path, storage_options={"test": "write"}, index=False)
assert fsspectest.test[0] == "write"
read_excel(path, storage_options={"test": "read"})
assert fsspectest.test[0] == "read"
@td.skip_if_no("fastparquet")
def test_to_parquet_new_file(cleared_fs):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df1.to_parquet(
"memory://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_no("pyarrow", min_version="2")
def test_arrowparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="pyarrow",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="pyarrow",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
@td.skip_if_no("fastparquet")
def test_fastparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="fastparquet",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="fastparquet",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@pytest.mark.single_cpu
@td.skip_if_no("s3fs")
def test_from_s3_csv(s3_resource, tips_file, s3so):
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv", storage_options=s3so), read_csv(tips_file)
)
# the following are decompressed by pandas, not fsspec
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv.gz", storage_options=s3so),
read_csv(tips_file),
)
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv.bz2", storage_options=s3so),
read_csv(tips_file),
)
@pytest.mark.single_cpu
@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
@td.skip_if_no("s3fs")
def test_s3_protocols(s3_resource, tips_file, protocol, s3so):
tm.assert_equal(
read_csv("%s://pandas-test/tips.csv" % protocol, storage_options=s3so),
read_csv(tips_file),
)
@pytest.mark.single_cpu
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
@td.skip_if_no("s3fs")
@td.skip_if_no("fastparquet")
def test_s3_parquet(s3_resource, s3so):
fn = "s3://pandas-test/test.parquet"
df1.to_parquet(
fn, index=False, engine="fastparquet", compression=None, storage_options=s3so
)
df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so)
tm.assert_equal(df1, df2)
@td.skip_if_installed("fsspec")
def test_not_present_exception():
msg = "Missing optional dependency 'fsspec'|fsspec library is required"
with pytest.raises(ImportError, match=msg):
read_csv("memory://test/test.csv")
@td.skip_if_no("pyarrow")
def test_feather_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_feather("testmem://afile", storage_options={"test": "feather_write"})
assert fsspectest.test[0] == "feather_write"
out = read_feather("testmem://afile", storage_options={"test": "feather_read"})
assert fsspectest.test[0] == "feather_read"
tm.assert_frame_equal(df, out)
def test_pickle_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_pickle("testmem://afile", storage_options={"test": "pickle_write"})
assert fsspectest.test[0] == "pickle_write"
out = read_pickle("testmem://afile", storage_options={"test": "pickle_read"})
assert fsspectest.test[0] == "pickle_read"
| tm.assert_frame_equal(df, out) | pandas._testing.assert_frame_equal |
"""analysis.py: Collection of classes for performing analysis on Corpus"""
# <NAME> (<EMAIL>)
# DS 5001
# 6 May 2021
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.core.algorithms import mode
import plotly.express as px
import scipy.cluster.hierarchy as sch
from gensim.models import word2vec
from scipy.linalg import eigh
from scipy.sparse.construct import random
from scipy.spatial.distance import pdist
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.manifold import TSNE
from eta_modules.preprocessing import Corpus
class HierarchicalClusterAnalysis:
def __init__(self, max_features=5000, tfidf_method='max', OHCO_level=['work_id', 'chapter_id']):
self.max_features = max_features
self.tfidf_method = tfidf_method
self.OHCO_level = OHCO_level
self.vocab = None
self.metrics = None
def fit(self, corpus: Corpus, metrics=['cosine']):
# Copy corpus over to prevent undesired modifications
self.corpus = corpus.copy()
self.metrics = metrics
self.bow = self.corpus.bow
self.vocab = self.corpus.vocab
# If original TFIDF bag or method doesn't match, recalculate
# Otherwise, already have good TFIDF values to use
if (corpus.tfidf_OHCO != self.OHCO_level) or (f"tfidf_{self.tfidf_method}_sum" not in self.vocab):
self.corpus.compute_tfidf(OHCO_level=self.OHCO_level, methods=[self.tfidf_method])
# Reassign objects just to be safe
self.bow = self.corpus.bow
self.vocab = self.corpus.vocab
self.vocab['dfidf'] = self.vocab['df'] * self.vocab['idf']
# Filter VOCAB to `max_features` words using DF-IDF; use that to filter BOW TFIDF values
self.vocab = self.vocab.sort_values('dfidf', ascending=False).head(self.max_features)
self.tfidf = self.bow[f"tfidf_{self.tfidf_method}"].unstack(fill_value=0)
self.tfidf = self.tfidf[self.vocab.index]
# Collapse tfidf to book level means
self.tfidf = self.tfidf.groupby(['work_id']).mean()
## Create DataFrame to hold pairwise distances
# Multindex -- combinations of indices; e.g., (0, 1), (0, 2), etc.
work_ids = self.corpus.lib.index.tolist()
self.pdists = pd.DataFrame(index=pd.MultiIndex.from_product([work_ids, work_ids])).reset_index()
# Remove self-combinations in index; e.g., (0, 0), (1, 1), etc.
self.pdists = self.pdists[self.pdists['level_0'] < self.pdists['level_1']].set_index(['level_0', 'level_1'])
self.pdists.index.names = ['doc_a', 'doc_b']
for metric in self.metrics:
if metric in ['jaccard', 'dice']:
L0 = self.tfidf.astype('bool').astype('int') # Binary
self.pdists[metric] = pdist(L0, metric)
elif metric in ['jensenshannon']:
L1 = self.tfidf.apply(lambda x: x / x.sum(), 1)
self.pdists[metric] = pdist(L1, metric)
else:
self.pdists[metric] = pdist(self.tfidf, metric)
def plot_dendrogram(self, linkage='complete', color_thresh=0.3, figsize=(8, 10)):
for metric in self.metrics:
tree = sch.linkage(self.pdists[metric], method=linkage)
labels = (self.corpus.lib['author'] + ': ' + self.corpus.lib['title']).values
plt.figure(figsize=figsize)
sch.dendrogram(tree,
labels=labels,
orientation="left",
count_sort=True,
distance_sort=True,
above_threshold_color='0.75',
color_threshold=color_thresh)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.title(f"Metric: {metric}")
class PCA:
def __init__(self, max_features=5000, tfidf_method='max', OHCO_level=['work_id', 'chapter_id']):
self.max_features = max_features
self.tfidf_method = tfidf_method
self.OHCO_level = OHCO_level
self.vocab = None
def fit(self, corpus, n_components=10):
# Copy corpus over to prevent undesired modifications
self.corpus = corpus.copy()
# Modify token to add in author label (affects output from computing TFIDF)
self.corpus.token = self.corpus.token.join(self.corpus.lib).reset_index().set_index(['author'] + self.corpus.OHCO)
# Since we want to include author groupings, must recalculate TFIDF regardless of original values
self.corpus.compute_tfidf(OHCO_level=(['author'] + self.OHCO_level), methods=[self.tfidf_method])
self.bow = self.corpus.bow
self.vocab = self.corpus.vocab
self.vocab['dfidf'] = self.vocab['df'] * self.vocab['idf']
# Filter VOCAB to `max_features` words using DF-IDF; use that to filter BOW TFIDF values
self.vocab = self.vocab.sort_values('dfidf', ascending=False).head(self.max_features)
# Form TFIDF matrix by taking mean based on author + work + chapter group means of terms
self.tfidf = self.bow.groupby(['author'] + self.corpus.OHCO[:2] + ['term_str'])[[f"tfidf_{self.tfidf_method}"]].mean().unstack(fill_value=0)
# Column index is currently a multi-index, with the top level being one element (the tfidf method used, e.g., "tfidf_max")
# Drop this level in the column index so we can index columns more easily and remove undesired words based on `max_features`
self.tfidf.columns = self.tfidf.columns.droplevel(0)
self.tfidf = self.tfidf[self.vocab.index] # filter words based on DF-IDF
self.tfidf = self.tfidf.apply(lambda x: x / np.sqrt(np.square(x).sum()), axis=1) # Apply L2 normalization to TFIDF rows (e.g., normalize values for words across a chapter)
self.tfidf = self.tfidf - self.tfidf.mean() # center word vectors
## PCA calculations
cov = self.tfidf.cov() # covariance matrix
eig_val, eig_vec = eigh(cov) # eigendecomposition of covariance matrix (dim: max_features x max_features)
self.eig_vec = pd.DataFrame(eig_vec, index=cov.index, columns=cov.index) # (dim: max_features x max_features)
self.eig_val = pd.DataFrame(eig_val, index=cov.index, columns=['eig_val']) # (dim: max_features x 1)
self.eig_pairs = self.eig_val.join(self.eig_vec.T)
self.eig_pairs['exp_var'] = np.round((self.eig_pairs['eig_val'] / self.eig_pairs['eig_val'].sum()) * 100, 2)
# Get top n components by explained variance
self.pc = self.eig_pairs.sort_values('exp_var', ascending=False).head(n_components).reset_index(drop=True)
self.pc.index.name = 'comp_id'
self.pc.index = [f"PC{i}" for i in self.pc.index.tolist()]
self.pc.index.name = 'pc_id'
# Project TFIDF using components (document-component matrix) and get loadings
self.dcm = self.tfidf.dot(self.pc[cov.index].T)
self.loadings = self.pc[cov.index].T
self.loadings.index.name = 'term_str'
def plot_2d(self, comp_id_1=0, comp_id_2=1):
dcm_plot = self.dcm.reset_index().copy()
dcm_plot = dcm_plot.merge((self.corpus.lib['author'] + '-' + self.corpus.lib['title']).to_frame('doc').reset_index())
dcm_plot['doc'] = dcm_plot['doc'] + '-' + dcm_plot['chapter_id'].astype('str')
fig = px.scatter(dcm_plot, f"PC{comp_id_1}", f"PC{comp_id_2}", color='author', hover_name='doc',
marginal_x='box', height=800)
fig.show()
class TopicModel:
def __init__(self, remove_proper_nouns=True, OHCO_level=['work_id', 'chapter_id'], max_features=5000, n_topics=40, n_topic_terms=10,
ngram_range=[1, 2], max_iter=20, random_state=None):
self.remove_proper_nouns = remove_proper_nouns
self.bag = OHCO_level
self.max_features=max_features
self.n_topics = n_topics
self.n_topic_terms = n_topic_terms
self.ngram_range = ngram_range
self.max_iter = max_iter
self.random_state=random_state
def fit(self, corpus):
# Copy corpus over to prevent undesired modifications
self.corpus = corpus.copy()
# Create a list of more complete document strings to work with scikit-learn's modules
self.corpus.token.term_str = self.corpus.token.term_str.astype('str')
if self.remove_proper_nouns:
regex_expr = r'^NNS?$'
else:
regex_expr = r'^NNP?S?$'
self.doc = (self.corpus.token[self.corpus.token.pos.str.match(regex_expr)]
.groupby(self.bag).term_str
.apply(lambda x: ' '.join(x))
.to_frame('doc_str'))
vectorizer = CountVectorizer(max_features=self.max_features, ngram_range=self.ngram_range, stop_words='english')
self.counts = vectorizer.fit_transform(self.doc.doc_str)
self.term = vectorizer.get_feature_names()
lda = LDA(n_components=self.n_topics, max_iter=self.max_iter, learning_offset=50., random_state=self.random_state)
# Theta table -- documents vs. topics
self.theta = pd.DataFrame(lda.fit_transform(self.counts), index=self.doc.index)
self.theta.columns.name = 'topic_id'
# Phi table -- terms vs. topics
self.phi = | pd.DataFrame(lda.components_, columns=self.term) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 11:52:00 2020
@author: <NAME>
This script reproduces Figure 2 from Pittman et al., 2021.
It produces a timeseries of New production and Co2 flux for each of the 6 eqpac moorings.
Includes SST, isotherm depth.
Calculates averages during el nino, la nina and neutral
Also calculates thermocline slope.
Requires:
datasets/tao/tao_physics/*
processed/combined_dataset/month_data_exports.nc
processed/flux/landsch_mooring_co2_flux.nc
processed/flux/npp.nc
Produces:
processed/results/means.csv
figs/Figure2_Co2fluxevents+ratio_name+.png
processed/results/enso_mooring_avg.csv
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from carbon_math import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.patches as patches
from matplotlib.ticker import MultipleLocator,AutoMinorLocator
from scipy.stats import linregress
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
def check_single_bias(truth,model):
bias=((model-truth)/truth)*100
abs_error=abs(bias)
logbias=10**(np.nanmean(np.log10(model)-np.log10(truth)))
medlogbias=10**(np.nanmedian(np.log10(model)-np.log10(truth)))
mae=10**(np.nanmean(abs(np.log10(model)-np.log10(truth))))
med_ae=10**(np.nanmedian(abs(np.log10(model)-np.log10(truth))))
return logbias,medlogbias,mae,med_ae,bias,abs_error
#Not recommended but helps to get the info that we need out
import warnings
warnings.filterwarnings("ignore")
### OPTIONAL CHANGE
plot_mooring_CO2=1 #CHANGE to 1 to check how the mooring data lines up.
moorings=['110W','125W','140W','155W','170W','165E'][::-1]
#Change startyear to 1980 for full timeseries and will auto save _alltime.
startyear=str(1997)
check_lag_corr_asf=[]
check_lag_corr_np=[]
# # %% Show distribution
# plt.figure(figsize=(10,10))
# plt.subplot(211)
# plt.grid()
# sns.violinplot(data=np.array(d).T,orient='h')#x='Mooring',y='co2flux',data=d.to_dataframe().T)
# plt.title('In situ CO2 flux (gC/m2/day)')
# plt.yticks([0,1,2,3,4,5],['110W','125W','140W','155W','170W','165E'])
# plt.xlim([-0.025,0.2])
# plt.subplot(212)
# plt.grid()
# plt.title('Landschutzer CO2 flux')
# sns.violinplot(data=np.array(d1).T,alpa=2,orient='h')
# plt.yticks([0,1,2,3,4,5],['110W','125W','140W','155W','170W','165E'])
# plt.xlim([-0.025,0.2])
# plt.show()
# %%
#190 mm x 230 mm
fs=12
fig=plt.figure()#figsize=(28,30))#,constrained_layout=True)
fig.set_size_inches(19,24)
gs=fig.add_gridspec(38,8)
means=pd.DataFrame()
# npp_insitu=pd.read_csv('processed/flux/shipboard_npp.csv',index_col='id') #Redundant
final_mooring_enso=pd.DataFrame()
fp='processed/combined_dataset/month_data_exports.nc'
#dat=xr.open_mfdataset(fp)
#print((dat.sel(Mooring=155).co2flux4_land_gmyr/365).min().values) #Min value at 155W (1998)
for i, mooring_name in enumerate(moorings):
print(mooring_name)
#JMAco2flux_data=xr.open_mfdataset('processed/flux/JMA_mooring_co2_flux.nc').rename({'time':'Date'}).sel(Mooring=mooring_name)
LandSch_co2flux_data=xr.open_mfdataset('processed/flux/landsch_mooring_co2_flux.nc').rename({'time':'Date'}).sel(Mooring=mooring_name)
LandSch_co2flux_data['Date']=LandSch_co2flux_data['Date'].astype("datetime64[M]")
npp=xr.open_mfdataset('processed/flux/npp.nc').sel(Mooring=mooring_name)/12
#mooring_obs_npp=npp_insitu[((npp_insitu.mooring.astype(int)>=int(mooring_name[:-1])-1)&(npp_insitu.mooring.astype(int)<=int(mooring_name[:-1])+1))]
ty='month' #Actually month though need to fix this.
fp='processed/combined_dataset/'+ty+'_data_exports.nc'
try:
dat=xr.open_mfdataset(fp).sel(Mooring=int(mooring_name[:-1]))
except:
dat=xr.open_mfdataset(fp).sel(Mooring=195)
ratio=dat.laws2011a#b#f_ratio#11a#f_ratio#laws2011a#.dunne_zeu1#thE_ratio#f_ratio
ratio_name='laws2011a'#'Laws 2011b'#'11a'#'Laws2000'#'Laws2011a'#'f_ratio'#'Dunne'#'thE-ratio'
#Original all NPPS
#avg_npp=npp[['viirs_eppley','viirs_cbpm','viirs_vgpm','sw_eppley','sw_cbpm','sw_vgpm','mod_cafe','mod_eppley','mod_cbpm','mod_vgpm']].to_dataframe().drop(columns='Mooring').mean(axis=1)
#Now only CBPM and CAFE
#avg_npp=npp[['viirs_cbpm','sw_cbpm','mod_cbpm']].to_dataframe().drop(columns='Mooring').mean(axis=1)
avg_npp=npp[['sw_cafe','mod_cafe']]
avg_npp=avg_npp.sel(Date=slice(ratio.Date.min(),ratio.Date.max()))
avg_npp=avg_npp.to_dataframe().drop(columns='Mooring').mean(axis=1)
temps=xr.open_mfdataset('datasets/tao/tao_physics/'+mooring_name+'/t0n'+mooring_name.lower()+'_dy.cdf')
dat['Date'].astype('datetime64[M]')
co2flux=carbon_flux(dat.sss2,dat.sst2,dat.windspeed,None,None,dat.delta_pCO2)[0]
#Need a .T to transform in ax2.contourf
#MAX 60 day gap fill. All depth filling is fine though.
temps_selected=temps.sel(depth=slice(0,250))
temps_selected=temps_selected.interpolate_na(dim='depth',method='linear')
temps_selected=temps_selected.interpolate_na(dim='time',method='linear',limit=60) #Stop doing major gap fills but small ones are useful.
temps_selected['depth']=temps_selected['depth']*-1
temps_selected=temps_selected.resample(time='M').mean().T_20
temps_selected['time']=temps_selected.time.astype('datetime64[M]')
start=i*6
bignext=start+4
smallnext=bignext+2
ax1=fig.add_subplot(gs[start:bignext,0:7]) #plt.subplot(111)#
if i==0:
ax1.set_title(str(str(mooring_name[0:3]+'\xb0'+mooring_name[3:4])),fontsize=fs+2)
else:
ax1.annotate(str(str(mooring_name[0:3]+'\xb0'+mooring_name[3:4])),
[np.datetime64('2008-02-01'),17],
fontsize=fs+2)
#ax1.plot(JMAco2flux_data.Date,moles_to_carbon(JMAco2flux_data.flux.values)/365,linewidth=4,label='Iida CO$_{2}$ flux',c='mediumblue')#'medium_')
ax1.plot(LandSch_co2flux_data.Date.astype('datetime64[M]'),
(LandSch_co2flux_data.fgco2_smoothed.values*1000)/365,
linewidth=3,
label='Landschutzer CO$_{2}$ flux',
c='slategray')
#CHECK HERE TO PLOT IN SITU CO2 FLUX
if plot_mooring_CO2==1:
ax1.plot(dat.Date.astype('datetime64[M]'),(dat.co2flux_gmyr/365)*1000/12,linewidth=4,label='in situ CO$_{2}$ flux',c='mediumblue')#'mediumblue')
#Calculate bias between insitu and product
landCO2=moles_to_carbon(LandSch_co2flux_data.fgco2_smoothed)/365
landCO2['Date']=LandSch_co2flux_data.Date.astype('datetime64[M]')
in_situCO2=dat.co2flux_gmyr/365
bias=(landCO2-in_situCO2).mean().values
print('LANDSCHUTZER BIAS: '+str(bias))
ax1.plot(avg_npp.index,(avg_npp.values)*ratio.sel({'Date':avg_npp.index}),linewidth=3,label='New Production',c='orangered')
ax1.axhline(0,linestyle='--',c='k',linewidth=3,alpha=0.8)
asf=(moles_to_carbon(LandSch_co2flux_data.fgco2_smoothed)/365)
npp=avg_npp.values/12*ratio.sel({'Date':avg_npp.index})
check_lag_corr_asf.append(asf.sel(Date=slice('1998-01-01','2019-12-31')).values)
check_lag_corr_np.append(npp.sel(Date=slice('1998-01-01','2019-12-31')).values)
#Put a drawdown indicator
#co2
co222=moles_to_carbon(LandSch_co2flux_data.fgco2_smoothed/365)
#drawdown=dat.co2flux4_land_gmyr.where(dat.co2flux4_land_gmyr<0)
drawdown=co222.where(co222<0)
draw_dates=drawdown[~drawdown.isnull()].Date.values
print('Drawdown Mean: '+str(drawdown.mean().values))
#if ~np.isnan(drawdown.mean()):
# print(drawdown.values)
ax1.scatter(draw_dates.astype('datetime64[M]'),np.zeros(len(draw_dates))+0.1,c='r',s=500,marker=11,label='Drawdown')
#Drawdown indicator
finyear='2020-01-01'
ax1.set_ylim([-6,20])
ax1.set_yticks([0,5,10,15,20])
ax1.set_xlim([np.datetime64(startyear),np.datetime64(finyear)])
ax1.tick_params(axis='both', which='major', labelsize=fs)
ax1.tick_params(labelbottom=False)
ax1.grid()
ax1.xaxis.set_minor_locator(AutoMinorLocator(4))
ax1.tick_params(which='major', length=12,direction='in',color='k',top=True)
ax1.tick_params(which='minor', length=6,direction='in',top=True)
#ax1.tick_params(which='minor', length=7,direction='inout')
print(mooring_name)
print('NPP avg: '+str((avg_npp/12*ratio.sel({'Date':avg_npp.index})).mean()))
print('NPP std: '+str((avg_npp/12*ratio.sel({'Date':avg_npp.index})).std()))
land_avg_dat=(((LandSch_co2flux_data.fgco2_smoothed.values)*1000)/365).mean()
print('Land avg: '+str(land_avg_dat))
print('Land std: '+str((((LandSch_co2flux_data.fgco2_smoothed.values)*1000)/365).std()))
#print('Iida avg: '+str((moles_to_carbon(JMAco2flux_data.flux.values)/365).mean()))
# print('Iida std: '+str((moles_to_carbon(JMAco2flux_data.flux.values)/365).std()))
situ_avg=(dat.co2flux_gmyr/365).mean().values*1000/12
print('In Situ avg: '+str(situ_avg))
print('In Situ std: '+str((dat.co2flux_gmyr/365).std().values*1000/12))
a=(check_single_bias(dat.co2flux_gmyr/365,moles_to_carbon(LandSch_co2flux_data.fgco2_smoothed)/365))
print('CO2 abs error ='+str(a[5].mean().values))
def perc_err(a,b):
return ((a-b)/b)*100
def MAPE(Y_actual,Y_Predicted):
Ya=Y_actual.dropna(dim='Date')
Yp=Y_Predicted.sel(Date=Ya.Date)
mape = np.mean(np.abs((Ya - Yp)/Ya))*100
mape1 = (np.mean(np.abs(Yp)) - np.mean(np.abs(Ya))/np.mean(np.abs(Yp)))*100
print(mape1.values)
return mape.values
def perc_err(a,b):
return ((a-b)/b)*100
def MAE(Y_actual,Y_Predicted):
Ya=Y_actual.dropna(dim='Date')
Yp=Y_Predicted.sel(Date=Ya.Date)
mae = np.mean(np.abs((Ya - Yp)))
return mae.values
print(f"HACK PERC {perc_err(situ_avg,land_avg_dat)}")
aa=((dat.co2flux_gmyr/12)/365)
bb=((LandSch_co2flux_data.fgco2_smoothed.sel(Date=dat.Date))/365)
mape=MAPE(aa,bb)#((, (LandSch_co2flux_data.fgco2_smoothed.sel(Date=dat.Date))/365)
mae=MAE(aa,bb)#(((dat.co2flux_gmyr/12)/365), (LandSch_co2flux_data.fgco2_smoothed.sel(Date=dat.Date))/365)
print(f"CO2 MAPE: {mape} and mae: {mae}")
epp=pd.DataFrame([dat.mod_eppley.values,dat.sw_eppley.values]).mean()
cbpm= | pd.DataFrame([dat.mod_cbpm.values,dat.sw_cbpm.values,dat.viirs_cbpm.values]) | pandas.DataFrame |
# *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import os
import time
import pytz
import numpy as np
import pandas as pd
from datetime import date, datetime
from dateutil import tz
from perspective.table import Table
LOCAL_DATETIMES = [
datetime(2019, 1, 11, 0, 10, 20),
datetime(2019, 1, 11, 11, 10, 20),
datetime(2019, 1, 11, 19, 10, 20)
]
# Test the DST transition for Continental US
LOCAL_DATETIMES_DST = [
datetime(2019, 3, 9, 12, 10, 20),
datetime(2019, 3, 19, 12, 10, 20),
datetime(2019, 11, 2, 12, 10, 20),
datetime(2019, 11, 3, 12, 10, 20)
]
LOCAL_TIMESTAMPS = [pd.Timestamp(d) for d in LOCAL_DATETIMES]
LOCAL_TIMESTAMPS_DST = [pd.Timestamp(d) for d in LOCAL_DATETIMES_DST]
# Set up testing data
UTC = pytz.UTC
UTC_DATETIMES = [UTC.localize(d) for d in LOCAL_DATETIMES]
UTC_TIMESTAMPS = [UTC.localize(d) for d in LOCAL_TIMESTAMPS]
UTC_DATETIMES_DST = [UTC.localize(d, is_dst=True) for d in LOCAL_DATETIMES_DST]
UTC_TIMESTAMPS_DST = [UTC.localize(d, is_dst=True) for d in LOCAL_TIMESTAMPS_DST]
PST = pytz.timezone("US/Pacific")
CST = pytz.timezone("US/Central")
EST = pytz.timezone("US/Eastern")
GMT = pytz.timezone("GMT")
HKT = pytz.timezone("Asia/Hong_Kong")
JPT = pytz.timezone("Asia/Tokyo")
ACT = pytz.timezone("Australia/ACT")
TIMEZONES = [PST, CST, EST, GMT, HKT, JPT, ACT]
TZ_DATETIMES = {}
TZ_TIMESTAMPS = {}
TZ_DATETIMES_DST = {}
TZ_TIMESTAMPS_DST = {}
for TZ in TIMEZONES:
TZ_DATETIMES[TZ.zone] = [TZ.localize(d) for d in LOCAL_DATETIMES]
TZ_TIMESTAMPS[TZ.zone] = [d.tz_localize(TZ) for d in LOCAL_TIMESTAMPS]
TZ_DATETIMES_DST[TZ.zone] = [d.astimezone(TZ) for d in UTC_DATETIMES_DST]
TZ_TIMESTAMPS_DST[TZ.zone] = [d.tz_convert(TZ) for d in UTC_TIMESTAMPS_DST]
if os.name != 'nt':
# no tzset on windows, run these tests on linux/mac only
class TestTableLocalDateTime(object):
"""Test datetimes across configurations such as local time, timezone-aware,
timezone-naive, and UTC implementations.
"""
def setup_method(self):
# To make sure that local times are not changed, set timezone to EST
os.environ["TZ"] = "US/Eastern"
time.tzset()
def teardown_method(self):
# go back to UTC at end of each test method, for consistency
os.environ["TZ"] = "UTC"
time.tzset()
def test_table_should_assume_local_time(self):
"""If a datetime object has no `tzinfo`, it should be assumed to be in
local time and not be converted at all.
"""
data = {
"a": LOCAL_DATETIMES
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_numpy_datetime64(self):
data = {
"a": [np.datetime64(d) for d in LOCAL_DATETIMES]
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_pandas_timestamp(self):
data = {
"a": LOCAL_TIMESTAMPS
}
# Timestamps are assumed to be in UTC by pandas
table = Table(data)
# Timestamps are read out in local time
assert table.view().to_dict()["a"] == LOCAL_DATETIMES
def test_table_should_assume_local_time_pandas_timestamp_df(self):
data = pd.DataFrame({
"a": LOCAL_TIMESTAMPS
})
# Timestamps are assumed to be in UTC by pandas
table = Table(data)
# Timestamps are read out in local time
assert table.view().to_dict()["a"] == [
datetime(2019, 1, 10, 19, 10, 20),
datetime(2019, 1, 11, 6, 10, 20),
datetime(2019, 1, 11, 14, 10, 20)
]
def test_table_should_assume_local_time_dst(self):
"""If a datetime object has no `tzinfo`, it should be assumed to be in
local time and not be converted at all.
"""
data = {
"a": LOCAL_DATETIMES_DST
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_numpy_datetime64_dst(self):
data = {
"a": [np.datetime64(d) for d in LOCAL_DATETIMES_DST]
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_pandas_timestamp_dst(self):
data = {
"a": LOCAL_TIMESTAMPS_DST
}
table = Table(data)
assert table.view().to_dict()["a"] == LOCAL_DATETIMES_DST
def test_table_should_assume_local_time_pandas_timestamp_dst_df(self):
data = pd.DataFrame({
"a": LOCAL_TIMESTAMPS_DST
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(2019, 3, 9, 7, 10, 20),
datetime(2019, 3, 19, 8, 10, 20),
datetime(2019, 11, 2, 8, 10, 20),
datetime(2019, 11, 3, 7, 10, 20)
]
def test_table_datetime_min(self):
data = {
"a": [datetime.min]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_min_df(self):
data = pd.DataFrame({
"a": [datetime.min]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_1900(self):
data = {
"a": [datetime(1900, 1, 1)]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1900, 1, 1)
]
def test_table_datetime_1900_df(self):
data = pd.DataFrame({
"a": [datetime(1900, 1, 1)]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1899, 12, 31, 19)
]
def test_table_datetime_1899(self):
data = {
"a": [datetime(1899, 1, 1)]
}
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1898, 12, 31, 19)
]
def test_table_datetime_1899_df(self):
data = pd.DataFrame({
"a": [datetime(1899, 1, 1)]
})
table = Table(data)
assert table.view().to_dict()["a"] == [
datetime(1898, 12, 31, 19)
]
def test_table_datetime_min_epoch(self):
data = {
"a": [0]
}
table = Table({
"a": datetime
})
table.update(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
def test_table_datetime_min_epoch_df(self):
data = pd.DataFrame({
"a": [0]
})
table = Table({
"a": datetime
})
table.update(data)
assert table.view().to_dict()["a"] == [
datetime(1969, 12, 31, 19, 0)
]
class TestTableDateTimeUTCToLocal(object):
def teardown_method(self):
# Set timezone to UTC, always
os.environ["TZ"] = "UTC"
time.tzset()
def test_table_should_convert_UTC_to_local_time_pytz_pacific(self):
"""If the datetime has `tzinfo` set, use it to convert the datetime to
UTC. Make sure this works with both `pytz` and `dateutil` for
`datetime` and `pandas.Timestamp`.
"""
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_central(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_eastern(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_GMT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_HKT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_JPT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_ACT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_central(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_eastern(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_GMT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Pacific"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_central_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Central"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_eastern_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Eastern"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_GMT_DST(self):
data = {
"a": UTC_DATETIMES_DST
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["GMT"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_pacific_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Pacific"]]
def test_table_should_convert_UTC_to_local_time_dateutil_central_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Central"]]
def test_table_should_convert_UTC_to_local_time_dateutil_eastern_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["US/Eastern"]]
def test_table_should_convert_UTC_to_local_time_dateutil_GMT_DST_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS_DST
})
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict()["a"] == [d.replace(tzinfo=None) for d in TZ_DATETIMES_DST["GMT"]]
def test_table_should_convert_UTC_to_local_time_dateutil_HKT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_JPT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_dateutil_ACT(self):
data = {
"a": UTC_DATETIMES
}
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
ACT = tz.gettz("Australia/Sydney")
assert table.view().to_dict() == {
"a": [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_UTC_to_local_time_pytz_pacific_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict()["a"] == [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_central_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict()["a"] == [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_eastern_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict()["a"] == [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_GMT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict()["a"] == [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_HKT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_JPT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_pytz_ACT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_pacific_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Pacific"
time.tzset()
# Should be in PST now
assert table.view().to_dict()["a"] == [d.astimezone(PST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_central_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
CST = tz.gettz("US/Central")
# Should be in CST now
assert table.view().to_dict()["a"] == [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_eastern_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict()["a"] == [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_GMT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
GMT = tz.gettz("GMT")
# Should be in GMT now
assert table.view().to_dict()["a"] == [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_HKT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_JPT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_UTC_to_local_time_dateutil_ACT_timestamp(self):
data = pd.DataFrame({
"a": UTC_TIMESTAMPS
})
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
class TestTableDateTimeArbitaryToLocal(object):
def teardown_method(self):
# Set timezone to UTC, always
os.environ["TZ"] = "UTC"
time.tzset()
def test_table_should_convert_PST_to_local_time_pytz_central(self):
data = {
"a": TZ_DATETIMES["US/Pacific"]
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_CST_to_local_time_pytz_eastern(self):
data = {
"a": TZ_DATETIMES["US/Central"]
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_EST_to_local_time_pytz_GMT(self):
data = {
"a": TZ_DATETIMES["US/Eastern"]
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_GMT_to_local_time_pytz_HKT(self):
data = {
"a": TZ_DATETIMES["GMT"]
}
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_HKT_to_local_time_pytz_JPT(self):
data = {
"a": TZ_DATETIMES["Asia/Hong_Kong"]
}
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_JPT_to_local_time_pytz_ACT(self):
data = {
"a": TZ_DATETIMES["Asia/Tokyo"]
}
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_PST_to_local_time_dateutil_central(self):
data = {
"a": TZ_DATETIMES["US/Pacific"]
}
table = Table(data)
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict() == {
"a": [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_CST_to_local_time_dateutil_eastern(self):
data = {
"a": TZ_DATETIMES["US/Central"]
}
table = Table(data)
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict() == {
"a": [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_EST_to_local_time_dateutil_GMT(self):
data = {
"a": TZ_DATETIMES["US/Eastern"]
}
table = Table(data)
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict() == {
"a": [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_GMT_to_local_time_dateutil_HKT(self):
data = {
"a": TZ_DATETIMES["GMT"]
}
table = Table(data)
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_HKT_to_local_time_dateutil_JPT(self):
data = {
"a": TZ_DATETIMES["Asia/Hong_Kong"]
}
table = Table(data)
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_JPT_to_local_time_dateutil_ACT(self):
data = {
"a": TZ_DATETIMES["Asia/Tokyo"]
}
table = Table(data)
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict() == {
"a": [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
}
def test_table_should_convert_PST_to_local_time_pytz_central_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Pacific"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict()["a"] == [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_CST_to_local_time_pytz_eastern_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Central"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict()["a"] == [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_EST_to_local_time_pytz_GMT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Eastern"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict()["a"] == [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_GMT_to_local_time_pytz_HKT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["GMT"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_HKT_to_local_time_pytz_JPT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["Asia/Hong_Kong"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "Asia/Tokyo"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(JPT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_JPT_to_local_time_pytz_ACT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["Asia/Tokyo"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "Australia/Sydney"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(ACT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_PST_to_local_time_dateutil_central_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Pacific"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "US/Central"
time.tzset()
# Should be in CST now
assert table.view().to_dict()["a"] == [d.astimezone(CST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_CST_to_local_time_dateutil_eastern_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Central"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "US/Eastern"
time.tzset()
# Should be in EST now
assert table.view().to_dict()["a"] == [d.astimezone(EST).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_EST_to_local_time_dateutil_GMT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["US/Eastern"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "GMT"
time.tzset()
# Should be in GMT now
assert table.view().to_dict()["a"] == [d.astimezone(GMT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_GMT_to_local_time_dateutil_HKT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["GMT"]
}
table = Table(pd.DataFrame(data))
os.environ["TZ"] = "Asia/Hong_Kong"
time.tzset()
assert table.view().to_dict()["a"] == [d.astimezone(HKT).replace(tzinfo=None) for d in data["a"]]
def test_table_should_convert_HKT_to_local_time_dateutil_JPT_timestamp(self):
data = {
"a": TZ_TIMESTAMPS["Asia/Hong_Kong"]
}
table = Table( | pd.DataFrame(data) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = | pd.date_range(0, -1, freq="5T") | pandas.date_range |
#!/usr/bin/python
####################################################################################
# '||''|. '|| _ '||
# || || || ... ... ... .. ... .. || ... ... .. .. .. ... ...
# ||'''|. || || || .| '|. || || .' '|| || || || || || ||' ||
# || || || || || || || || || |. || || || || || || || |
# .||...|' .||. '|..'|. '|..|' .||. ||. '|..'||. '|..'|. .|| || ||. ||...'
# ||
# ''''
####################################################################################
import requests
import json
import pandas as pd
from plotnine import *
import click
banner = """
'||''|. '|| _ '||
|| || || ... ... ... .. ... .. || ... ... .. .. .. ... ...
||'''|. || || || .| '|. || || .' '|| || || || || || ||' ||
|| || || || || || || || || |. || || || || || || || |
.||...|' .||. '|..'|. '|..|' .||. ||. '|..'||. '|..'|. .|| || ||. ||...'
Dump all data connected to a Bluon GPS Lora localizer. ||
'''' """
print(banner)
@click.command()
@click.option('--days', default=10, help='(Numeric - Default:10) Days to go back in the history - if you set 0 I will stop at first empty day')
@click.option('--csv', default=True, help='(Boolean - Default:True) Save all data in a CSV file')
@click.option('--key', prompt='Your key', help='(String) The bluon key like in the link present in some email (url parameter s)')
@click.option('--quiet', default=False, help='(Boolean - Default:False) Be quiet')
@click.option('--image', default=False, help='(Boolean - Default:False) Create images')
def main(days, csv, key, quiet, image):
"""Dump all data connected to a Bluon GPS Lora localizer."""
set = {}
set['id'] = []
set['tracker_dev_id'] = []
set['lat'] = []
set['lon'] = []
set['date'] = []
set['speed'] = []
set['altitude'] = []
set['gps_num'] = []
set['hdop'] = []
set['emergency'] = []
set['charging'] = []
set['battery'] = []
set['seqno'] = []
set['sf'] = []
set['timestamp'] = []
if (days==0):
stop = 1000000
else:
stop = days
for i in range (0, stop):
payload = {'s': key, 'offset': str(i)}
headers = {'content-type': 'application/x-www-form-urlencoded'}
r = requests.post('https://bluon.me/utilities/active/load_coords.php', data=payload, headers=headers)
if len(r.content) > 0:
dataset = r.json()[0]['last_coord']
if (days==0):
if len(dataset) == 0:
break
for row in dataset:
if not quiet:
print(row)
set['id'].append(row['id'])
set['tracker_dev_id'].append(row['tracker_dev_id'])
set['lat'].append(row['lat'])
set['lon'].append(row['lon'])
set['date'].append(row['date'])
set['speed'].append(row['speed'])
set['altitude'].append(row['altitude'])
set['gps_num'].append(row['gps_num'])
set['hdop'].append(row['hdop'])
set['emergency'].append(row['emergency'])
set['charging'].append(row['charging'])
set['battery'].append(row['battery'])
set['seqno'].append(row['seqno'])
set['sf'].append(row['sf'])
set['timestamp'].append(row['timestamp'])
df = pd.DataFrame(set)
df['id'] = pd.to_numeric(df.id)
df['lat'] = pd.to_numeric(df.lat)
df['lon'] = pd.to_numeric(df.lon)
df['speed'] = pd.to_numeric(df.speed)
df['altitude'] = pd.to_numeric(df.altitude)
df['gps_num'] = pd.to_numeric(df.gps_num)
df['hdop'] = pd.to_numeric(df.hdop)
df['emergency'] = pd.to_numeric(df.emergency)
df['charging'] = pd.to_numeric(df.charging)
df['battery'] = pd.to_numeric(df.battery)
df['seqno'] = pd.to_numeric(df.seqno)
df['sf'] = pd.to_numeric(df.sf)
df['timestamp'] = | pd.to_datetime(df['timestamp'], unit='ms', origin='unix') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 00:20:45 2020
@author: mhrahman
"""
#%%
import json,os , glob, shutil
import re
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import scipy.stats as st
from sklearn import preprocessing
import pickle
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
#%%
## convert to time gap in second------------------------
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
#%%
csv = pd.read_csv(all_file[25])
csv = csv[csv.Timegap != 0]
ax = plt.plot()
for j, txt in enumerate(list(csv.Action)):
ax.annotate()
plt.plot(pd.to_datetime(pd.read_csv(f_list[0]).Timestamp))
plt.plot(csv.Timegap)
plt.ylabel("Time gaps in second")
def Greaterthannumber(val,actions,number):
if len(val) != len(actions):
return
for i in range(0,len(actions)):
if val[i] > number:
plt.annotate(actions[i], (i,val[i]),rotation = -90, fontsize = 8)
Greaterthannumber(csv.Timegap,csv.Action,20)
plt.show()
bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100,110,120,130,140,150,160,170,180,190,200]
fu = csv['Timegap'].value_counts(bins=bins, sort=False)
bins = list(range(1, int(max(csv.Timegap)) ,1))
#%%
# Frequency
def pdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
bins = list(range(1, int(max(csv.Timegap)) ,1))
sns.histplot(csv.Timegap,bins = bins)
#out = r'D:\Molla\Stoughton_data\Distribution\PDF_timegap'
out = r'D:\Molla\Uark_Data\Result\Timegap\PDF'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
pdf(all_file)
def cdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
sns.kdeplot(csv.Timegap,cumulative = True)
#out = r'D:\Molla\Stoughton_data\Distribution\CDF_timegap'
out = r'D:\Molla\Uark_Data\Result\Timegap\CDF'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
cdf(all_file)
#%%
def get_best_distribution(data):
# dist_names = ["norm", "exponweib", "weibull_max", "weibull_min","expon","pareto", "genextreme","gamma","beta",'halfcauchy','lognorm']
dist_names = ["genextreme"]
dist_results = []
params = {}
for dist_name in dist_names:
dist = getattr(st, dist_name)
param = dist.fit(data)
params[dist_name] = param
# Applying the Kolmogorov-Smirnov test
D, p = st.kstest(data, dist_name, args=param)
print("p value for "+dist_name+" = "+str(p))
dist_results.append((dist_name, p))
# select the best fitted distribution
best_dist, best_p = (max(dist_results, key=lambda item: item[1]))
# store the name of the best fit and its p value
print("Best fitting distribution: "+str(best_dist))
print("Best p value: "+ str(best_p))
print("Parameters for the best fit: "+ str(params[best_dist]))
return best_dist, best_p, params[best_dist]
#%%
def pdf (file_list):
for i in range(len(file_list)):
os.chdir(p_path)
file_nm = os.path.splitext(file_list[i])[0]
csv = pd.read_csv(file_list[i])
csv = csv[csv.Timegap != 0]
bins = list(range(1, int(max(csv.Timegap)) ,1))
sns.distplot(csv.Timegap,bins = bins)
y = np.asarray(list(csv.Timegap))
x = np.arange(len(y))
number_of_bins = len(y)
bin_cutoffs = np.linspace(np.percentile(y,0), np.percentile(y,99),number_of_bins)
h = plt.hist(y, bins = bin_cutoffs, color='red')
pdf_fitted = dist.pdf(np.arange(len(y)),param[:-2],loc = param[-2],scale = param[-1])
scale_pdf = np.trapz(h[0],h[1][:-1])/np.trapz(pdf_fitted,x)
pdf_fitted *= scale_pdf
plt.plot(pdf_fitted)
plt.show()
#%%
def pdf_fitted(csv):
y = np.asarray(list(csv.Timegap))
x = np.arange(len(y))
number_of_bins = len(y)
# bin_cutoff = np.linspace(np.percentile(y,0),np.percentile(y,99),number_of_bins)
h = plt.hist(y,bins= 300)
k = get_best_distribution(y)
dist = getattr(st,k[0])
param = k[2]
# pdf_fit = dist.pdf(x,param[:-2],loc = param[-2],scale = param[-1])
pdf_fit = dist.pdf(x,param[0],param[1])
scaled_pdf = np.trapz(h[0],h[1][:-1])/np.trapz(pdf_fit,x)
# plt.xlim(0,300)
pdf_fit *= scaled_pdf
plt.plot(pdf_fit,'--g',linewidth = 0.6,label = 'GEV distribution')
plt.legend(loc = 'upper right')
# plt.xlabel("Time gap (second)")
# plt.ylabel("Frequecy of time gap")
plt.show()
#%%
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
for i in range(len(all_file)):
os.chdir(p_path)
file_nm = os.path.splitext(all_file[i])[0]
csv = pd.read_csv(all_file[i])
csv = csv[csv.Timegap != 0]
pdf_fitted(csv)
#out = r'D:\Molla\Stoughton_data\Distribution\New_dist'
out = r'D:\Molla\Uark_Data\Result\Timegap\Fitter_dist'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
# Distribution for all
#%%
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
file = []
dist_name = []
parameters = []
param_1 = []
param_2 = []
param_3 = []
for i in range(len(all_file)):
os.chdir(p_path)
file_nm = os.path.splitext(all_file[i])[0]
csv = pd.read_csv(all_file[i])
csv = csv[csv.Timegap != 0]
k = get_best_distribution(csv.Timegap)
dist_name.append(k[0])
file.append(file_nm)
a = k[2][0]
b = k[2][1]
c = k[2][2]
param_1.append(a)
param_2.append(b)
param_3.append(c)
Df = pd.DataFrame({
'Param 1': param_1,
'param 2':param_2,
'param 3': param_3})
Only_values = Df.values
#%%# Saving the embedding
#loc = r'D:\Molla\Stoughton_data\Models\New folder\Saved_embedding'
loc = r'D:\Molla\Uark_Data\Result\Saved_emd'
#loc = r'D:\Molla\Stoughton_data\For_Journal\Saved_embedding'
os.chdir(loc)
with open('Timegap.pkl','wb') as f:
pickle.dump(Df.values,f)
#%%
def elbow_plot(matrix):
wcss = []
for i in range(1,10):
Kmeans = KMeans(n_clusters= i, init= 'k-means++', random_state= 42)
Kmeans.fit(matrix)
wcss.append(Kmeans.inertia_)
plt.plot(range(1,10), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
elbow_plot(Only_values)
def plot_kmean(num_cluster,vector,n_component):
reduced_data_PCA = PCA(n_components= n_component).fit_transform(vector)
kmeans = KMeans(init='k-means++', n_clusters= num_cluster, n_init=10)
kmeans.fit(vector)
labels = kmeans.fit_predict(vector)
print(labels)
fig = plt.figure(figsize=(5.5, 3))
ax = Axes3D(fig, rect=[0, 0, .7, 1], elev=48, azim=134)
ax.scatter(reduced_data_PCA[:, 1], reduced_data_PCA[:, 0], reduced_data_PCA[:, 2],
c=labels.astype(np.float), edgecolor="k", s=50)
plt.show()
return kmeans
kmeans = plot_kmean(3,Only_values,3)
action_clust = []
for j in range(kmeans.n_clusters):
at = []
for i in np.where(kmeans.labels_ == j)[0]:
at.append(file[i])
action_clust.append(at)
df = pd.DataFrame(action_clust).T
columns = ["0", "1","2"]
df.columns = columns
## LOAD design_output
path = r'D:\Molla\Stoughton_data\Distribution'
os.chdir(path)
design_output = pd.read_csv('Design_output.csv')
design_output.set_index('Computer ID')
mean = []
std = []
for i in range(len(df.columns)):
cluster_wise = []
for j in range(len(df['{}'.format(i)])):
design = df['{}'.format(i)][j]
if design in list(design_output['Computer ID']):
a = design_output.loc[design_output['Computer ID'] == design, 'Co-efficient'].iloc[0]
cluster_wise.append(a)
m = np.mean(cluster_wise)
s = np.std(cluster_wise)
mean.append(m)
std.append(s)
df.loc[len(df)] = mean
df.loc[len(df)] = std
df = df.rename(index = {df.index[-2]:'mean',df.index[-1]:'std'})
out_path = r'D:\Molla\Stoughton_data\Distribution'
os.chdir(out_path)
df.to_csv('Timegap_cluster.csv', index = True)
# Additional for distribution-----
distribution = "expon"
data = np.asarray(list(csv.Timegap))
dist = getattr(st, distribution)
param = dist.fit(data)
# Get random numbers from distribution
norm = dist.rvs(loc=param[-2], scale=param[-1],size = len(data))
norm.sort()
# Create figure
fig = plt.figure(figsize=(8,5))
# qq plot
ax1 = fig.add_subplot(121) # Grid of 2x2, this is suplot 1
ax1.plot(norm,data,"o")
min_value = np.floor(min(min(norm),min(data)))
max_value = np.ceil(max(max(norm),max(data)))
ax1.plot([min_value,max_value],[min_value,max_value],'r--')
ax1.set_xlim(min_value,max_value)
ax1.set_xlabel('Theoretical quantiles')
ax1.set_ylabel('Observed quantiles')
title = 'qq plot for ' + distribution +' distribution'
ax1.set_title(title)
# pp plot
ax2 = fig.add_subplot(122)
# Calculate cumulative distributions
bins = np.percentile(norm,range(0,101))
data_counts, bins = np.histogram(data,bins)
norm_counts, bins = np.histogram(norm,bins)
cum_data = np.cumsum(data_counts)
cum_norm = np.cumsum(norm_counts)
cum_data = cum_data / max(cum_data)
cum_norm = cum_norm / max(cum_norm)
# plot
ax2.plot(cum_norm,cum_data,"o")
min_value = np.floor(min(min(cum_norm),min(cum_data)))
max_value = np.ceil(max(max(cum_norm),max(cum_data)))
ax2.plot([min_value,max_value],[min_value,max_value],'r--')
ax2.set_xlim(min_value,max_value)
ax2.set_xlabel('Theoretical cumulative distribution')
ax2.set_ylabel('Observed cumulative distribution')
title = 'pp plot for ' + distribution +' distribution'
ax2.set_title(title)
# Display plot
plt.tight_layout(pad=4)
plt.show()
#%%
#X_means clustering -------------------------------------------------------------
reduced_data = PCA(n_components=3).fit_transform(Only_values)
amount_initial_centers = 2
initial_centers = kmeans_plusplus_initializer(reduced_data,amount_initial_centers).initialize()
xmeans_instance = xmeans(reduced_data, initial_centers, 20)
xmeans_instance.process()
# Extract clustering results: clusters and their centers
clusters = xmeans_instance.get_clusters()
centers = xmeans_instance.get_centers()
# Visualize clustering results
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, reduced_data,marker = 'o',markersize = 20)
visualizer.append_cluster(centers, None, marker='*', markersize=100)
visualizer.show()
#%%
#For converting clusters assignment
clusts = []
order = np.concatenate(clusters).argsort()
clusts = list(np.concatenate([ [i]*len(e) for i,e in enumerate(clusters) ])[order])
print(clusts)
#plot cluster
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
scatter = ax.scatter(np.array(centers)[:, 1],
np.array(centers)[:, 0],
np.array(centers)[:, 2],
s = 250,
marker='o',
c='red',
label='centroids')
scatter = ax.scatter(reduced_data[:, 1], reduced_data[:, 0], reduced_data[:, 2],
c=clusts,s=20, cmap='winter')
#ax.set_title('X-Means Clustering')
ax.set_xlabel('Principal component 1')
ax.set_ylabel('Principal component 2')
ax.set_zlabel('Principal component 3')
ax.legend()
plt.show()
out_path = r'D:\Molla\Uark_Data\Result\Timegap\Result'
#out_path = r'D:\Molla\Stoughton_data\For_Journal\Result\Time_gap'
os.chdir(out_path)
fig.savefig('Timegap.tif', format='tif', dpi=300)
#%%
# For getting the student ID
action_clust = []
for j in range(len(clusters)):
at = []
for i in np.where(np.array(clusts) == j)[0]:
at.append(file[i])
action_clust.append(at)
df = pd.DataFrame(action_clust).T
columns = []
for i in range(len(clusters)):
columns.append(i)
columns = list(map(str,columns))
df.columns = columns
#%%
## LOAD design_output
#path = r'D:\Molla\Stoughton_data\Distribution'
path = r'D:\Molla\Uark_Data\Design_out'
os.chdir(path)
design_output = pd.read_csv('Design_output.csv')
design_output['Computer ID'] = design_output['Computer ID'].astype('str')
design_output.set_index('Computer ID')
mean = []
std = []
for i in range(len(df.columns)):
cluster_wise = []
for j in range(len(df['{}'.format(i)])):
design = df['{}'.format(i)][j]
if design in list(design_output['Computer ID']):
a = design_output.loc[design_output['Computer ID'] == design, 'Co-efficient'].iloc[0]
cluster_wise.append(a)
m = np.mean(cluster_wise)
s = np.std(cluster_wise)
mean.append(m)
std.append(s)
df.loc[len(df)] = mean
df.loc[len(df)] = std
df = df.rename(index = {df.index[-2]:'mean',df.index[-1]:'std'})
#out_path = r'D:\Molla\Stoughton_data\For_Journal\Result\Time_gap'
out_path = r'D:\Molla\Uark_Data\Result\Timegap\Result'
os.chdir(out_path)
df.to_csv('Timegap_cluster.csv', index = True)
#%%
# Performance analysis
performance = []
for i in range(len(df.columns)):
cluster_wise = []
for j in range(len(df['{}'.format(i)])):
design = df['{}'.format(i)][j]
if design in list(design_output['Computer ID']):
a = design_output.loc[design_output['Computer ID'] == design, 'Co-efficient'].iloc[0]
cluster_wise.append(a)
performance.append(cluster_wise)
perfor = pd.DataFrame(performance).T
#out_path = r'D:\Molla\Stoughton_data\For_Journal\Result\Time_gap'
out_path = r'D:\Molla\Uark_Data\Result\Timegap\Result'
os.chdir(out_path)
perfor.to_csv('Performance.csv', index = True)
#%%#### FOR PLOT
def pdf_fitted(csv):
y = np.asarray(list(csv.Timegap))
# x = np.arange(len(y))
# number_of_bins = len(y)
bins = list(range(1, int(max(csv.Timegap)) ,1))
ax = sns.histplot(csv.Timegap,bins = bins,stat = 'density')
# k = get_best_distribution(y)
dist = getattr(st,'genextreme')
params = dist.fit(y)
arg = params[:-2]
loc = params[-2]
scale = params[-1]
x_min, x_max = ax.get_xlim()
xs = np.linspace(x_min, x_max, 200)
ax.plot(xs, dist.pdf(xs, arg, loc=loc, scale=scale), color='r', ls=':', linewidth = 0.5, label='fitted GEV')
ax.set_xlim(x_min, x_max)
# if arg:
# pdf_fitted = dist.pdf(x, *arg, loc=loc, scale=scale)* 200
# else:
# pdf_fitted = dist.pdf(x, loc=loc, scale=loc)* 200
# plt.plot(pdf_fitted, '--g',linewidth = 0.6,label = 'GEV distribution')
plt.legend(loc = 'upper right')
plt.show()
#p_path = r'D:\Molla\Stoughton_data\Data_stoughtn\Data_value\Final_Data_value'
p_path = r'D:\Molla\Uark_Data\Extracted_data\Valid_action'
os.chdir(p_path)
all_file = os.listdir(p_path)
for i in range(len(all_file)):
os.chdir(p_path)
file_nm = os.path.splitext(all_file[i])[0]
csv = pd.read_csv(all_file[i])
csv = csv[csv.Timegap != 0]
pdf_fitted(csv)
# out = r'D:\Molla\Stoughton_data\Distribution\PDF_try'
out = r'D:\Molla\Uark_Data\Result\Timegap\Fitted'
os.chdir(out)
plt.savefig('{}.png'.format(file_nm),bbox_inches='tight',dpi = 600)
plt.close()
#%%
def make_pdf(dist, params, size=10000):
"""Generate distributions's Probability Distribution Function """
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Get sane start and end points of distribution
start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)
end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)
# Build PDF and turn into pandas Series
x = np.linspace(start, end, size)
y = dist.pdf(x, loc=loc, scale=scale, *arg)
pdf = | pd.Series(y, x) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append('.')
import pandas as pd
import numpy as np
import warnings
import agents
from agents import Seller, Buyer
from environments import MarketEnvironment
import sys
sys.path.append("../..")
sys.path.append("../../old_code")
from plot_config import *
import info_settings
import matchers
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
# Utility functions
def get_state(agent_id, market):
'''
A cleaner way of getting the state for a user. Assumes that information setting is black-box.
'''
return market.setting.get_state(agent_id, market.deal_history, market.agents, market.offers)[0]
#Problem settings
num_episodes = 1
num_sellers = 5
num_buyers = 5
max_budget = 100
init_offer = 0
min_ask_price = 20
init_ask_price = 100
non_rl_policy = agents.MarketAgent.get_new_offer
## name of the RL agent
rl_agent = "MCC"
# Creating data structures
names = ['H1_s', 'H2_s', 'H3_s', 'H4_s', 'H5_s', 'H6_b', 'H7_b', 'H8_b', 'H9_b', rl_agent]
agent_list = [] # this exists because the MarketEnvironment wants lists
agent_dict = {} # the code will use this because it is much more handy
for i in range(0, num_sellers):
s = Seller(names[i], min_ask_price)
agent_list.append(s)
agent_dict[names[i]] = s
for i in range(num_sellers, num_sellers + num_buyers):
if (names[i] != rl_agent):
b = Buyer(names[i], max_budget)
# initialize your agent here (if it is a buyer)
else:
b = agents.MonteCarlo_MarketAgent(names[i], max_budget, agents.MarketAgent.get_random_offer, agents.MarketAgent.get_random_offer_p)
Q = np.load('Q_values.npy')
b.Q_table = Q
b.evaluate() # sets b to evaluating mode, to avoid updating the Q table
agent_list.append(b)
agent_dict[names[i]] = b
# Creating the market
market = MarketEnvironment(sellers=agent_list[0:num_sellers], buyers=agent_list[num_sellers:num_sellers+num_buyers], max_steps=10,
matcher=matchers.RandomMatcher(reward_on_reference=True), setting=info_settings.BlackBoxSetting)
for i in range (0, num_episodes):
init_observation = market.reset()
# resetting the agents
for a_id, a in agent_dict.items():
a.reset()
step1_offers = {}
for a_id, a in agent_dict.items():
# This if-clause will be problematic in the future. Right now, any RL seller won't be
# recognized as a seller. To solve, one can create a list of seller classes, and see
# if the instance's class belongs to the list.
if (isinstance(a, Seller)):
step1_offers[a_id] = init_ask_price
a.actions.append(init_ask_price)
else:
step1_offers[a_id] = init_offer
a.actions.append(init_offer)
#First step in the market
observations, rewards, done, _ = market.step(step1_offers)
for k in rewards.keys():
agent_dict[k].total_rewards += rewards[k]
agent_dict[k].rewards.append(rewards[k])
# Rest:
while market.time < market.max_steps:
offers = {}
for a_id, a in agent_dict.items():
state = get_state(a_id, market)
if (a_id == rl_agent):
new_offer = a.target_policy(state) # call your RL algorithm here
else:
new_offer = non_rl_policy(a, state)
offers[a_id] = new_offer
if (market.done[a_id] == False):
a.actions.append(new_offer)
observations, rewards, done, _ = market.step(offers)
for k in rewards.keys():
if (agent_dict[k].done == False):
agent_dict[k].total_rewards += rewards[k]
agent_dict[k].rewards.append(rewards[k])
# Instead of setting the done field manually, call the done function of the agent,
# because some RL algorithms train not after every step, but after every episode.
# Such algorithms can silently run by calling their done() function.
if (market.done[k]):
agent_dict[k].set_done()
total_rewards = {}
sum = 0
for a_id, a in agent_dict.items():
total_rewards[a_id] = a.total_rewards
for a_id, a in total_rewards.items():
total_rewards[a_id] /= total_rewards[rl_agent]
print( | pd.DataFrame(total_rewards, index=[0]) | pandas.DataFrame |
import pandas as pd
#import geopandas as gpd
import numpy as np
import os
#from sqlalchemy import create_engine
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
import math
#from shapely import wkt
from datetime import datetime, timedelta, date
import time
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import requests
from pyspark.sql import SparkSession
from pyspark.sql.functions import substring, length, col, expr
from pyspark.sql.types import *
import matplotlib.pyplot as plt
#import contextily as cx --> gives error?
spark = SparkSession \
.builder \
.getOrCreate()
def get_minio_herkomst_2020():
bucket = "gvb-gvb"
data_key = "*/*/*/Datalab_Reis_Herkomst_Uur_*.csv"
data_location = bucket + "/" + data_key
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep = ";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2020 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/*/*/*/Datalab_Reis_Bestemming_Uur_*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def get_minio_herkomst_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Herkomst_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep =";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Bestemming_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def read_csv_dir(dir):
read_csv_beta = pd.read_csv(dir,sep=';')
return read_csv_beta
def get_knmi_obs():
knmi_obs_schema = StructType([StructField("DD", StringType(), True),
StructField("DR", StringType(), True),
StructField("FF", StringType(), True),
StructField("FH", StringType(), True),
StructField("FX", StringType(), True),
StructField("IX", StringType(), True),
StructField("M", IntegerType(), True),
StructField("N", IntegerType(), True),
StructField("O", IntegerType(), True),
StructField("P", IntegerType(), True),
StructField("Q", IntegerType(), True),
StructField("R", IntegerType(), True),
StructField("RH", IntegerType(), True),
StructField("S", IntegerType(), True),
StructField("SQ", IntegerType(), True),
StructField("T", IntegerType(), True),
StructField("T10N", IntegerType(), True),
StructField("TD", IntegerType(), True),
StructField("U", IntegerType(), True),
StructField("VV", IntegerType(), True),
StructField("WW", IntegerType(), True),
StructField("Y", IntegerType(), True),
StructField("date", StringType(), True),
StructField("hour", IntegerType(), True),
StructField("station_code", IntegerType(), True)
])
knmi_obs = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi-observations/2021/*/*/*", schema=knmi_obs_schema)
return knmi_obs
def get_knmi_preds():
knmi_pred_schema = StructType([StructField("cape", IntegerType(), True),
StructField("cond", StringType(), True),
StructField("gr", StringType(), True),
StructField("gr_w", StringType(), True),
StructField("gust", StringType(), True),
StructField("gustb", StringType(), True),
StructField("gustkmh", StringType(), True),
StructField("gustkt", StringType(), True),
StructField("hw", StringType(), True),
StructField("ico", StringType(), True),
StructField("icoon", StringType(), True),
StructField("loc", StringType(), True),
StructField("luchtd", StringType(), True),
StructField("luchtdinhg", StringType(), True),
StructField("luchtdmmhg", StringType(), True),
StructField("lw", StringType(), True),
StructField("mw", StringType(), True),
StructField("neersl", StringType(), True),
StructField("offset", StringType(), True),
StructField("rv", StringType(), True),
StructField("samenv", IntegerType(), True),
StructField("temp", StringType(), True),
StructField("tijd", StringType(), True),
StructField("tijd_nl", StringType(), True),
StructField("tw", StringType(), True),
StructField("vis", StringType(), True),
StructField("windb", StringType(), True),
StructField("windkmh", StringType(), True),
StructField("windknp", StringType(), True),
StructField("windr", StringType(), True),
StructField("windrltr", StringType(), True),
StructField("winds", StringType(), True)
])
knmi_pred_cols = ('cape', 'cond', 'gr', 'gr_w', 'gust', 'gustb', 'gustkmh', 'gustkt',
'hw', 'ico', 'icoon', 'loc', 'luchtd', 'luchtdinhg', 'luchtdmmhg', 'lw',
'mw', 'neersl', 'offset', 'rv', 'samenv', 'temp', 'tijd', 'tijd_nl',
'tw', 'vis', 'windb', 'windkmh', 'windknp', 'windr', 'windrltr',
'winds')
knmi_pred = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi/2021/*/*/*.json.gz", schema=knmi_pred_schema).select(*knmi_pred_cols)
return knmi_pred
def get_prediction_df():
"""
Return the prediction dataframe (date- and hours only)
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(first_date, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
return prediction_date_range_hour
def get_vacations():
"""
Retrieves vacations in the Netherlands from the Government of the Netherlands (Rijksoverheid) and returns
the list of dates that are vacation dates
"""
vacations_url = 'https://opendata.rijksoverheid.nl/v1/sources/rijksoverheid/infotypes/schoolholidays?output=json'
vacations_raw = requests.get(url = vacations_url).json()
df_vacations = pd.DataFrame(columns={'vacation', 'region', 'startdate', 'enddate'})
for x in range(0, len(vacations_raw)): # Iterate through all vacation years
for y in range(0, len(vacations_raw[0]['content'][0]['vacations'])): # number of vacations in a year
dates = pd.DataFrame(vacations_raw[x]['content'][0]['vacations'][y]['regions'])
dates['vacation'] = vacations_raw[x]['content'][0]['vacations'][y]['type'].strip() # vacation name
dates['school_year'] = vacations_raw[x]['content'][0]['schoolyear'].strip() # school year
df_vacations = df_vacations.append(dates)
filtered = df_vacations[(df_vacations['region']=='noord') | (df_vacations['region']=='heel Nederland')]
vacations_date_only = pd.DataFrame(columns={'date'})
for x in range(0, len(filtered)):
df_temporary = pd.DataFrame(data = {'date':pd.date_range(filtered.iloc[x]['startdate'], filtered.iloc[x]['enddate'], freq='D') + pd.Timedelta(days=1)})
vacations_date_only = vacations_date_only.append(df_temporary)
vacations_date_only['date'] = vacations_date_only['date'].apply(lambda x: x.date)
vacations_date_only['date'] = vacations_date_only['date'].astype('datetime64[ns]')
# Since the data from Rijksoverheid starts from school year 2019-2020, add the rest of 2019 vacations manually!
kerst_18 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 1, 1), periods = 6, freq='1d')})
voorjaar_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 2, 16), periods = 9, freq='1d')})
mei_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 4, 27), periods = 9, freq='1d')})
zomer_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 7, 13), periods = 7*6 + 2, freq='1d')})
vacations_date_only = vacations_date_only.append([kerst_18, voorjaar_19, mei_19, zomer_19])
return vacations_date_only
def get_events():
"""
Event data from static file. We can store events in the database in the near future. When possible, we can get it from an API.
"""
events = pd.read_excel('events_zuidoost.xlsx', sheet_name='Resultaat', header=1)
# Clean
events.dropna(how='all', inplace=True)
events.drop(events.loc[events['Datum']=='Niet bijzonder evenementen zijn hierboven niet meegenomen.'].index, inplace=True)
events.drop(events.loc[events['Locatie'].isna()].index, inplace=True)
events.drop(events.loc[events['Locatie']=='Overig'].index, inplace=True)
events['Datum'] = events['Datum'].astype('datetime64[ns]')
# Fix location names
events['Locatie'] = events['Locatie'].apply(lambda x: x.strip()) # Remove spaces
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo dome', 'Ziggo Dome', events['Locatie'])
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo Dome (2x)', 'Ziggo Dome', events['Locatie'])
# Get events from 2019 from static file
events = events[events['Datum'].dt.year>=2019].copy()
events.reset_index(inplace=True)
events.drop(columns=['index'], inplace=True)
events
# Add 2020-present events manually
events = events.append({'Datum':datetime(2020, 1, 19)}, ignore_index=True) # Ajax - Sparta
events = events.append({'Datum':datetime(2020, 2, 2)}, ignore_index=True) # Ajax - PSV
events = events.append({'Datum':datetime(2020, 2, 16)}, ignore_index=True) # Ajax - RKC
events = events.append({'Datum':datetime(2020, 1, 3)}, ignore_index=True) # Ajax - AZ
# Euro 2021
events = events.append({'Datum':datetime(2021, 6, 13)}, ignore_index=True) # EURO 2020 Nederland- Oekraïne
events = events.append({'Datum':datetime(2021, 6, 17)}, ignore_index=True) # EURO 2020 Nederland- Oostenrijk
events = events.append({'Datum':datetime(2021, 6, 21)}, ignore_index=True) # EURO 2020 Noord-Macedonië - Nederland
events = events.append({'Datum':datetime(2021, 6, 26)}, ignore_index=True) # EURO 2020 Wales - Denemarken
return events
def merge_csv_json(bestemming_csv, herkomst_csv, bestemming_json, herkomst_json):
bestemming = | pd.concat([bestemming_csv, bestemming_json]) | pandas.concat |
from __future__ import annotations
import typing
import toolcli
import toolstr
from ctc import evm
from ctc import spec
from ctc.cli import cli_utils
command_help = """output ERC20 balances of blocks / addresses / tokens"""
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': async_balances_command,
'help': command_help,
'args': [
{'name': 'args', 'nargs': '+', 'help': '<see above>'},
{'name': '--block', 'help': 'block number'},
{
'name': '--wallets',
'nargs': '+',
'help': 'wallets to get balances of',
},
{
'name': '--blocks',
'nargs': '+',
'help': 'block numbers to get balances at',
},
{
'name': '--erc20s',
'nargs': '+',
'help': 'ERC20 addresses to get balances of',
},
#
{
'name': '--raw',
'action': 'store_true',
'help': 'whether to skip normalizing by ERC20 decimals',
},
{
'name': '--output',
'default': 'stdout',
'help': 'file path for output (.json or .csv)',
},
{
'name': '--overwrite',
'action': 'store_true',
'help': 'specify that output path can be overwritten',
},
{'name': '--top', 'metavar': 'N', 'help': 'show top N addresses'},
],
'examples': {
'WALLET --erc20s ERC20S [--block BLOCK]': {
'description': 'version 1: balance of single wallet across multiple tokens',
'runnable': False,
},
'ERC20 [--block BLOCK] [--wallets WALLETS]': {
'description': 'version 2: balances of multiple wallets in single block (default = all wallets)',
'runnable': False,
},
'ERC20 WALLET --blocks BLOCKS': {
'description': 'version 3: balance of single wallet across multiple blocks',
'runnable': False,
},
},
}
async def async_balances_command(
args: typing.Sequence[str],
block: typing.Optional[spec.BlockNumberReference],
wallets: typing.Optional[typing.Sequence[str]],
blocks: typing.Optional[typing.Sequence[str]],
erc20s: typing.Optional[typing.Sequence[str]],
raw: bool,
output: str,
overwrite: bool,
top: typing.Optional[str],
) -> None:
import pandas as pd
if wallets is not None:
wallets = await evm.async_resolve_addresses(wallets, block=block)
if erc20s is not None:
erc20s = await evm.async_resolve_addresses(erc20s, block=block)
indent = None
if len(args) == 1 and erc20s is not None:
# multiple erc20s, single wallet, single block
if wallets is not None or blocks is not None:
raise Exception(
'can only specify one of --erc20s --wallets, --blocks'
)
wallet = args[0]
if block is None:
block = 'latest'
block = await evm.async_block_number_to_int(block)
wallet = await evm.async_resolve_address(wallet, block=block)
symbols_coroutine = evm.async_get_erc20s_symbols(erc20s)
balances = await evm.async_get_erc20s_balance_of(
address=wallet,
tokens=erc20s,
block=block,
normalize=(not raw),
)
symbols = await symbols_coroutine
data = {'balance': balances, 'symbol': symbols, 'erc20_address': erc20s}
df = pd.DataFrame(data)
df = df.set_index('erc20_address')
output_data: typing.Union[spec.DataFrame, spec.Series] = df
toolstr.print_text_box('ERC20 balances in wallet')
print('- wallet:', wallet)
print('- block:', block)
print('- n_tokens:', len(erc20s))
print()
print()
indent = ' '
elif len(args) == 1:
# single erc20, multiple wallets, single block
if blocks is not None or erc20s is not None:
raise Exception(
'can only specify one of --erc20s --wallets, --blocks'
)
erc20 = args[0]
if block is None:
block = 'latest'
block = await evm.async_block_number_to_int(block)
erc20 = await evm.async_resolve_address(erc20, block=block)
symbol_coroutine = evm.async_get_erc20_symbol(erc20)
if wallets is not None:
balances = await evm.async_get_erc20_balance_of_addresses(
addresses=wallets,
token=erc20,
block=block,
normalize=(not raw),
)
symbol = await symbol_coroutine
series = | pd.Series(balances, index=wallets) | pandas.Series |
"""Author: <NAME>
This contains the main Spomato class to be used to access the Spotify API and create new playlists based on the user's
defined criteria.
"""
import os
import pandas as pd
import spotipy
class Spomato():
"""Object used to access spotify API through spotipy and generate playlists.
This can take a combination user's saved tracks, playlists, and/or artist's songs to generate a playlist of a
specified length. This was conceived to use the Tomato Timer method as Spotify playlists.
This does require the user to provide a user API token from the spotify API. The API scopes used by this library are
playlist-read-private, playlist-modify-private, and user-library-read.
Parameters
----------
access_token : str
A valid Spotify Access token.
Attributes
----------
data : dictionary
Dictionary storing available data structures to create playlists.
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read
current_user_id : str
The string id of the user of the access token used to create the spotipy session.
"""
def __init__(self,
access_token=None):
"""Initialization function that sets access token and generates initial spotipy session.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
self.access_token = access_token
self.data = {}
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def update_token(self, access_token):
"""Updates the token and spotify session with the provided access_token. Generally used if your access token
has expired.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
# update the class access token and the spotipy session
self.access_token = access_token
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def _get_spotipy_session(self):
"""Internal Function to create a new spotify session.
Returns
-------
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
"""
return spotipy.Spotify(auth=self.access_token)
@staticmethod
def _parse_album(album_data, market='US'):
"""Parses the album data returned from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
album_data : dict
A dictionary of album data from Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the album data and parse the track data
series_list = []
album_tracks = album_data['tracks']['items']
for record in album_tracks:
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_user_playlist(data, market='US'):
"""Parses a user playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['tracks']['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_public_playlist(data, market='US'):
"""Parses public playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_saved_tracks(data, market='US'):
"""Parses a the saved songs data set of the user from the Spotify API and returns the song information as a
pandas DataFrame.
Parameters
----------
data : dictionary
Contains saved songs of the user from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the saved track data and parse the individual track data
series_list = []
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = | pd.concat(series_list, axis=1) | pandas.concat |
#! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from typing import Tuple, Union
import pandas as pd
from ludwig.constants import SPLIT
def _split(data_df, split):
if SPLIT in data_df:
data_df[SPLIT] = pd.to_numeric(data_df[SPLIT])
if split:
if SPLIT in data_df:
training_set = data_df[data_df[SPLIT] == 0].drop(columns=[SPLIT])
val_set = data_df[data_df[SPLIT] == 1].drop(columns=[SPLIT])
test_set = data_df[data_df[SPLIT] == 2].drop(columns=[SPLIT])
return training_set, test_set, val_set
else:
raise ValueError("The dataset does not have splits, " "load with `split=False`")
return data_df
class CSVLoadMixin:
"""Reads a CSV file into a Pandas DataFrame."""
config: dict
processed_dataset_path: str
def load_processed_dataset(self, split) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]:
"""Loads the processed CSV into a dataframe.
:param split: Splits along 'split' column if present.
:returns: The preprocessed dataset, or a tuple of (train, validation, test) datasets.
"""
data_df = pd.read_csv(self.dataset_path)
return _split(data_df, split)
@property
def csv_filename(self):
return self.config["csv_filename"]
@property
def dataset_path(self):
return os.path.join(self.processed_dataset_path, self.csv_filename)
class ParquetLoadMixin:
"""Reads a Parquet file into a Pandas DataFrame."""
config: dict
processed_dataset_path: str
def load_processed_dataset(self, split) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]]:
"""Loads the processed Parquet into a dataframe.
:param split: Splits along 'split' column if present
:returns: The preprocessed dataset, or a tuple of (train, validation, test) datasets.
"""
dataset_path = os.path.join(self.processed_dataset_path, self.parquet_filename)
data_df = | pd.read_parquet(dataset_path) | pandas.read_parquet |
import pandas as pd
from tqdm import tqdm
import newspaper
from newspaper import Config
config = Config()
config.memoize_articles = False
config.fetch_images = False
config.verbose = True
config.MAX_SUMMARY_SENT = 10
config.language = "vi"
vnexpress = newspaper.build('https://vnexpress.net', config=config)
zingvn = newspaper.build('https://news.zing.vn', config=config)
kenh14 = newspaper.build('https://kenh14.vn/', config=config)
# list_url_slate_paper = []
# for article in slate_paper.articles:
# list_url_slate_paper.append(article.url)
def extract_data(news, file_name):
print("crawling: ", file_name)
data = []
for article in tqdm(news.articles):
# count += 1
# if count > 10:
# break
if "#box_comment" in article.url:
continue
temp = {"link": article.url}
try:
article.download()
article.parse()
article.nlp()
temp["keyword"] = ";".join(article.keywords)
temp["summary"] = ";".join(article.summary.split("\n"))
temp["text"] = article.text
temp["title"] = article.title
temp["publish_date"] = article.publish_date
except Exception as e:
print(e)
pass
data.append(temp)
# print(data)
df_frame = | pd.DataFrame(data) | pandas.DataFrame |
import unittest
from unittest.mock import MagicMock
import haiku as hk
import numpy as np
import pandas as pd
from jax import numpy as jnp
from jax.nn import relu
import pandas_toolkit.nn
from pandas_toolkit.nn import _get_batch, _get_num_batches
from pandas_toolkit.nn.Model import Model
from pandas_toolkit.utils.custom_types import Batch
class TestGetNumBatches(unittest.TestCase):
def test_returns_1_when_batch_size_is_None(self):
batch_size = None
num_rows = MagicMock()
actual_batch_number = _get_num_batches(num_rows, batch_size)
expected_batch_number = 1
self.assertEqual(expected_batch_number, actual_batch_number)
def test_returns_correctly_when_batch_size_perfectly_divides_num_rows(self):
batch_size = 2
num_rows = 10
actual_batch_number = _get_num_batches(num_rows, batch_size)
expected_batch_number = 5
self.assertEqual(expected_batch_number, actual_batch_number)
def test_returns_correctly_when_batch_size_does_not_perfectly_divide_num_rows(self):
batch_size = 3
num_rows = 10
actual_batch_number = _get_num_batches(num_rows, batch_size)
expected_batch_number = 4
self.assertEqual(expected_batch_number, actual_batch_number)
class TestGetBatch(unittest.TestCase):
def test_returns_correctly(self):
df_train = pd.DataFrame({"x": [0, 1, 2], "y": [10, 11, 12]})
batch_size = 2
batch_number = 0
actual_batch = _get_batch(df_train, batch_number, batch_size, x_columns=["x"], y_columns=["y"])
expected_batch = Batch(x=jnp.array([[0], [1]]), y=jnp.array([[10], [11]]))
np.testing.assert_array_equal(expected_batch.x, actual_batch.x)
np.testing.assert_array_equal(expected_batch.y, actual_batch.y)
batch_number = 1
actual_batch = _get_batch(df_train, batch_number, batch_size, x_columns=["x"], y_columns=["y"])
expected_batch = Batch(x=jnp.array([[2]]), y=jnp.array([[12]]))
np.testing.assert_array_equal(expected_batch.x, actual_batch.x)
np.testing.assert_array_equal(expected_batch.y, actual_batch.y)
class TestWorkflow(unittest.TestCase):
def test_simple_relu_net(self):
# Train/Validation data
df_train = | pd.DataFrame({"x": [0, 1], "y": [0, 1]}) | pandas.DataFrame |
import copy
import logging
import pandas as pd
import numpy as np
from collections import Counter
from sklearn import preprocessing, utils
import sklearn.model_selection as ms
from scipy.sparse import isspmatrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import os
import seaborn as sns
from abc import ABC, abstractmethod
# TODO: Move this to a common lib?
OUTPUT_DIRECTORY = './output'
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
if not os.path.exists('{}/images'.format(OUTPUT_DIRECTORY)):
os.makedirs('{}/images'.format(OUTPUT_DIRECTORY))
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def plot_pairplot(title, df, class_column_name=None):
plt = sns.pairplot(df, hue=class_column_name)
return plt
# Adapted from https://stats.stackexchange.com/questions/239973/a-general-measure-of-data-set-imbalance
def is_balanced(seq):
n = len(seq)
classes = [(clas, float(count)) for clas, count in Counter(seq).items()]
k = len(classes)
H = -sum([(count/n) * np.log((count/n)) for clas, count in classes])
return H/np.log(k) > 0.75
class DataLoader(ABC):
def __init__(self, path, verbose, seed):
self._path = path
self._verbose = verbose
self._seed = seed
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
self.binary = False
self.balanced = False
self._data = pd.DataFrame()
def load_and_process(self, data=None, preprocess=True):
"""
Load data from the given path and perform any initial processing required. This will populate the
features and classes and should be called before any processing is done.
:return: Nothing
"""
if data is not None:
self._data = data
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
else:
self._load_data()
self.log("Processing {} Path: {}, Dimensions: {}", self.data_name(), self._path, self._data.shape)
if self._verbose:
old_max_rows = pd.options.display.max_rows
pd.options.display.max_rows = 10
self.log("Data Sample:\n{}", self._data)
pd.options.display.max_rows = old_max_rows
if preprocess:
self.log("Will pre-process data")
self._preprocess_data()
self.get_features()
self.get_classes()
self.log("Feature dimensions: {}", self.features.shape)
self.log("Classes dimensions: {}", self.classes.shape)
self.log("Class values: {}", np.unique(self.classes))
class_dist = np.histogram(self.classes)[0]
class_dist = class_dist[np.nonzero(class_dist)]
self.log("Class distribution: {}", class_dist)
self.log("Class distribution (%): {}", (class_dist / self.classes.shape[0]) * 100)
self.log("Sparse? {}", isspmatrix(self.features))
if len(class_dist) == 2:
self.binary = True
self.balanced = is_balanced(self.classes)
self.log("Binary? {}", self.binary)
self.log("Balanced? {}", self.balanced)
def scale_standard(self):
self.features = StandardScaler().fit_transform(self.features)
if self.training_x is not None:
self.training_x = StandardScaler().fit_transform(self.training_x)
if self.testing_x is not None:
self.testing_x = StandardScaler().fit_transform(self.testing_x)
def build_train_test_split(self, test_size=0.3):
if not self.training_x and not self.training_y and not self.testing_x and not self.testing_y:
self.training_x, self.testing_x, self.training_y, self.testing_y = ms.train_test_split(
self.features, self.classes, test_size=test_size, random_state=self._seed, stratify=self.classes
)
def get_features(self, force=False):
if self.features is None or force:
self.log("Pulling features")
self.features = np.array(self._data.iloc[:, 0:-1])
return self.features
def get_classes(self, force=False):
if self.classes is None or force:
self.log("Pulling classes")
self.classes = np.array(self._data.iloc[:, -1])
return self.classes
def dump_test_train_val(self, test_size=0.2, random_state=123):
ds_train_x, ds_test_x, ds_train_y, ds_test_y = ms.train_test_split(self.features, self.classes,
test_size=test_size,
random_state=random_state,
stratify=self.classes)
pipe = Pipeline([('Scale', preprocessing.StandardScaler())])
train_x = pipe.fit_transform(ds_train_x, ds_train_y)
train_y = np.atleast_2d(ds_train_y).T
test_x = pipe.transform(ds_test_x)
test_y = np.atleast_2d(ds_test_y).T
train_x, validate_x, train_y, validate_y = ms.train_test_split(train_x, train_y,
test_size=test_size, random_state=random_state,
stratify=train_y)
test_y = pd.DataFrame(np.where(test_y == 0, -1, 1))
train_y = pd.DataFrame(np.where(train_y == 0, -1, 1))
validate_y = pd.DataFrame(np.where(validate_y == 0, -1, 1))
tst = pd.concat([pd.DataFrame(test_x), test_y], axis=1)
trg = pd.concat([pd.DataFrame(train_x), train_y], axis=1)
val = pd.concat([pd.DataFrame(validate_x), validate_y], axis=1)
tst.to_csv('data/{}_test.csv'.format(self.data_name()), index=False, header=False)
trg.to_csv('data/{}_train.csv'.format(self.data_name()), index=False, header=False)
val.to_csv('data/{}_validate.csv'.format(self.data_name()), index=False, header=False)
@abstractmethod
def _load_data(self):
pass
@abstractmethod
def data_name(self):
pass
@abstractmethod
def _preprocess_data(self):
pass
@abstractmethod
def class_column_name(self):
pass
@abstractmethod
def pre_training_adjustment(self, train_features, train_classes):
"""
Perform any adjustments to training data before training begins.
:param train_features: The training features to adjust
:param train_classes: The training classes to adjust
:return: The processed data
"""
return train_features, train_classes
def reload_from_hdf(self, hdf_path, hdf_ds_name, preprocess=True):
self.log("Reloading from HDF {}".format(hdf_path))
loader = copy.deepcopy(self)
df = pd.read_hdf(hdf_path, hdf_ds_name)
loader.load_and_process(data=df, preprocess=preprocess)
loader.build_train_test_split()
return loader
def log(self, msg, *args):
"""
If the learner has verbose set to true, log the message with the given parameters using string.format
:param msg: The log message
:param args: The arguments
:return: None
"""
if self._verbose:
logger.info(msg.format(*args))
class CreditDefaultData(DataLoader):
def __init__(self, path='data/default of credit card clients.xls', verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = pd.read_excel(self._path, header=1, index_col=0)
def data_name(self):
return 'CreditDefaultData'
def class_column_name(self):
return 'default payment next month'
def _preprocess_data(self):
pass
def pre_training_adjustment(self, train_features, train_classes):
"""
Perform any adjustments to training data before training begins.
:param train_features: The training features to adjust
:param train_classes: The training classes to adjust
:return: The processed data
"""
return train_features, train_classes
class CreditApprovalData(DataLoader):
def __init__(self, path='data/crx.data', verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data = | pd.read_csv(self._path, header=None) | pandas.read_csv |
import pandas as pd
from resources.utilities import preprocess_sentence
import os
from settings import BASE_DIR
def apply_preprocessing(data):
"""
Description: function that applies preprocessing to the text in
the data
Input:
-data: pandas DataFrame, the data to be preprocessed
Output:
-data: pandas DataFrame, the preprocessed data
"""
for i, row in data.iterrows():
row["text"] = preprocess_sentence(row["text"])
return data
# Load the cleaned data from a .csv file
twitter_data = os.path.join(BASE_DIR, "corpora/raw/twitter/cleaned_twitter.csv")
data = pd.read_csv(twitter_data, encoding="ISO-8859-1")
# Set the data split percentages
training_perc = 0.6
dev_perc = 0.3
testing_perc = 0.1
# Assign the data for the positive and negative sentiments to proper dataframes
pos_data = data[data["target"] == "positive"][["text", "target"]]
neg_data = data[data["target"] == "negative"][["text", "target"]]
# Modify column names to conform to standard format
pos_data.columns = ["text", "sentiment"]
neg_data.columns = ["text", "sentiment"]
# Shuffle the positive data and split according to percentages
pos_data_shuffled = pos_data.sample(frac=1).reset_index(drop=True)
len_pos = len(pos_data_shuffled)
pos_training = pos_data_shuffled.loc[: int(len_pos * training_perc)]
pos_dev = pos_data_shuffled.loc[
int(len_pos * training_perc) : int(len_pos * (training_perc + dev_perc))
]
pos_testing = pos_data_shuffled.loc[int(len_pos * (training_perc + dev_perc)) :]
# Shuffle the negative data and split according to percentages
neg_data_shuffled = neg_data.sample(frac=1).reset_index(drop=True)
len_neg = len(neg_data_shuffled)
neg_training = neg_data_shuffled.loc[: int(len_neg * training_perc)]
neg_dev = neg_data_shuffled.loc[
int(len_neg * training_perc) : int(len_neg * (training_perc + dev_perc))
]
neg_testing = neg_data_shuffled.loc[int(len_neg * (training_perc + dev_perc)) :]
# Construct the development and testing data from their positive and negative
# components
dev_data = | pd.concat([pos_dev, neg_dev]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : utils.py
# Modified : 17.02.2022
# By : <NAME> <<EMAIL>>
from collections import OrderedDict
import numpy as np
import os
from typing import List
import random
import cv2
from PIL import Image
import torch
import torchvision
from pathlib import Path
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from efficientnet_pytorch import EfficientNet
from torchvision import transforms
from torch.utils.data import Dataset
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
import wandb
training_transforms = transforms.Compose([#Microscope(),
#AdvancedHairAugmentation(),
transforms.RandomRotation(30),
#transforms.RandomResizedCrop(256, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
#transforms.ColorJitter(brightness=32. / 255.,saturation=0.5,hue=0.01),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
testing_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# Creating seeds to make results reproducible
def seed_everything(seed_value):
np.random.seed(seed_value)
random.seed(seed_value)
torch.manual_seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = 2022
seed_everything(seed)
def get_parameters(net, EXCLUDE_LIST) -> List[np.ndarray]:
parameters = []
for i, (name, tensor) in enumerate(net.state_dict().items()):
# print(f" [layer {i}] {name}, {type(tensor)}, {tensor.shape}, {tensor.dtype}")
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Convert torch.Tensor to NumPy.ndarray
parameters.append(tensor.cpu().numpy())
return parameters
def set_parameters(net, parameters, EXCLUDE_LIST):
keys = []
for name in net.state_dict().keys():
# Check if this tensor should be included or not
exclude = False
for forbidden_ending in EXCLUDE_LIST:
if forbidden_ending in name:
exclude = True
if exclude:
continue
# Add to list of included keys
keys.append(name)
params_dict = zip(keys, parameters)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=False)
class Net(nn.Module):
def __init__(self, arch, return_feats=False):
super(Net, self).__init__()
self.arch = arch
self.return_feats = return_feats
if 'fgdf' in str(arch.__class__):
self.arch.fc = nn.Linear(in_features=1280, out_features=500, bias=True)
if 'EfficientNet' in str(arch.__class__):
self.arch._fc = nn.Linear(in_features=self.arch._fc.in_features, out_features=500, bias=True)
#self.dropout1 = nn.Dropout(0.2)
else:
self.arch.fc = nn.Linear(in_features=arch.fc.in_features, out_features=500, bias=True)
self.output = nn.Linear(500, 1)
def forward(self, images):
"""
No sigmoid in forward because we are going to use BCEWithLogitsLoss
Which applies sigmoid for us when calculating a loss
"""
x = images
features = self.arch(x)
output = self.output(features)
if self.return_feats:
return features
return output
def load_model(model = 'efficientnet-b2', device="cuda"):
if "efficientnet" in model:
arch = EfficientNet.from_pretrained(model)
elif model == "googlenet":
arch = torchvision.models.googlenet(pretrained=True)
else:
arch = torchvision.models.resnet50(pretrained=True)
model = Net(arch=arch).to(device)
return model
def create_split(source_dir, n_b, n_m):
# Split synthetic dataset
input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if os.path.isfile(f)]
ind_0, ind_1 = [], []
for i, f in enumerate(input_images):
if f.split('.')[0][-1] == '0':
ind_0.append(i)
else:
ind_1.append(i)
train_id_list, val_id_list = ind_0[:round(len(ind_0)*0.8)], ind_0[round(len(ind_0)*0.8):] #ind_0[round(len(ind_0)*0.6):round(len(ind_0)*0.8)] ,
train_id_1, val_id_1 = ind_1[:round(len(ind_1)*0.8)], ind_1[round(len(ind_1)*0.8):] #ind_1[round(len(ind_1)*0.6):round(len(ind_1)*0.8)] ,
train_id_list = np.append(train_id_list, train_id_1)
val_id_list = np.append(val_id_list, val_id_1)
return train_id_list, val_id_list #test_id_list
def load_isic_by_patient(partition, path='/workspace/melanoma_isic_dataset'):
# Load data
df = pd.read_csv(os.path.join(path,'train_concat.csv'))
train_img_dir = os.path.join(path,'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
df["patient_id"] = df["patient_id"].fillna('nan')
# df.loc[df['patient_id'].isnull()==True]['target'].unique() # 337 rows melanomas
"""
# EXP 6: same bias/ratio same size - different BIASES
bias_df = pd.read_csv("/workspace/flower/bias_pseudoannotations_real_train_ISIC20.csv")
bias_df['image_name'] = [os.path.join(train_img_dir, bias_df.iloc[index]['image_name']) for index in range(len(bias_df))]
#bias_df = pd.merge(bias_df, df, how='inner', on=["image_name"])
target_groups = bias_df.groupby('target', as_index=False) # keep column target
df_ben = target_groups.get_group(0) # 32533 benign
df_mal = target_groups.get_group(1) # 5105 melanoma
# EXP 6
if partition == 0:
#FRAMES
df_b = df_ben.groupby('black_frame').get_group(1) # 687 with frame
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((1,0))[:323] # 2082 with frame
df = pd.concat([df_b, df_m]) # Use 1010 (32%mel) # TOTAL 2848 (75% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
# RULES
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(1125) # 4717 with rules and no frames
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,1)).head(375) # 516 with rules and no frames
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 5233 (10% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
# NONE
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(1125) # 27129 without frames or rulers
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0)).head(375) # 2507 without frames or rulers 14%
df = pd.concat([df_b, df_m]) # Use 1500 (25%mel) # TOTAL 29636 (8.4% mel)
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = df_ben.groupby(['black_frame','ruler_mark']).get_group((0,0))[2000:5000] # 3000
df_m = df_mal.groupby(['black_frame','ruler_mark']).get_group((0,0))[500:1500] # 1000 (30% M) T=4000
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
"""
# Split by Patient
patient_groups = df.groupby('patient_id') #37311
# Split by Patient and Class
melanoma_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if patient_groups.get_group(x)['target'].unique().all()==1] # 4188 - after adding ID na 4525
benign_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if 0 in patient_groups.get_group(x)['target'].unique()] # 2055 - 33123
np.random.shuffle(melanoma_groups_list)
np.random.shuffle(benign_groups_list)
# EXP 5: same bias/ratio different size - simulate regions
if partition == 0:
df_b = pd.concat(benign_groups_list[:270]) # 4253
df_m = pd.concat(melanoma_groups_list[:350]) # 1029 (19.5% melanomas) T=5282
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
df_b = pd.concat(benign_groups_list[270:440]) # 2881
df_m = pd.concat(melanoma_groups_list[350:539]) # 845 (22.6% melanomas) T=3726
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
df_b = pd.concat(benign_groups_list[440:490]) # 805
df_m = pd.concat(melanoma_groups_list[539:615]) # 194 (19.4% melanomas) T=999
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 3:
df_b = pd.concat(benign_groups_list[490:511]) # 341
df_m = pd.concat(melanoma_groups_list[615:640]) # 87 (20% melanomas) T=428
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 4:
df_b = pd.concat(benign_groups_list[515:520]) # 171
df_m = pd.concat(melanoma_groups_list[640:656]) # 47 (21.5% melanomas) T=218
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[520:720]) # 3531
df_m = pd.concat(melanoma_groups_list[700:1100]) # 1456 (29% M) T=4987
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
"""
# EXP 4: same size (1.5k) different ratio b/m
if partition == 1:
df_b = pd.concat(benign_groups_list[:75]) # 1118
df_m = pd.concat(melanoma_groups_list[:90]) # 499 (30.8% melanomas) T=1617
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 2:
df_b = pd.concat(benign_groups_list[75:185]) # 1600
df_m = pd.concat(melanoma_groups_list[90:95]) # 17 (1% melanomas) T=1617
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 0:
df_b = pd.concat(benign_groups_list[185:191]) # 160
df_m = pd.concat(melanoma_groups_list[150:550]) # 1454 (90% melanomas) T=1614
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[500:700]) # 3630
df_m = pd.concat(melanoma_groups_list[600:1100]) # 1779 (33% M) T=5409
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
# EXP 3
if partition == 2:
df_b = pd.concat(benign_groups_list[:90]) # 1348
df_m = pd.concat(melanoma_groups_list[:60]) # 172 (11.3% melanomas) T=1520
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 1:
df_b = pd.concat(benign_groups_list[90:150]) # 937
df_m = pd.concat(melanoma_groups_list[60:90]) # 99 (10% melanomas) T=1036
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
elif partition == 0:
df_b = pd.concat(benign_groups_list[150:170]) # 246
df_m = pd.concat(melanoma_groups_list[90:300]) # 626 (72% melanomas) T=872
df = pd.concat([df_b, df_m])
train_split, valid_split = train_test_split(df, stratify=df.target, test_size = 0.20, random_state=42)
else:
#server
df_b = pd.concat(benign_groups_list[170:370]) # 3343
df_m = pd.concat(melanoma_groups_list[300:1000]) # 2603
valid_split = pd.concat([df_b, df_m])
validation_df=pd.DataFrame(valid_split)
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
return testing_dataset
#EXP 2
if partition == 2:
df_b_test = pd.concat(benign_groups_list[1800:]) # 4462
df_b_train = pd.concat(benign_groups_list[800:1800]) # 16033 - TOTAL 20495 samples
df_m_test = pd.concat(melanoma_groups_list[170:281]) # 340
df_m_train = pd.concat(melanoma_groups_list[281:800]) # 1970 - TOTAL: 2310 samples
elif partition == 1:
df_b_test = pd.concat(benign_groups_list[130:250]) # 1949
df_b_train = pd.concat(benign_groups_list[250:800]) # 8609 - TOTAL 10558 samples
df_m_test = pd.concat(melanoma_groups_list[1230:]) # 303
df_m_train = pd.concat(melanoma_groups_list[800:1230]) # 1407 - TOTAL 1710 samples
else:
df_b_test = pd.concat(benign_groups_list[:30]) # 519
df_b_train = pd.concat(benign_groups_list[30:130]) # 1551 - TOTAL: 2070 samples
df_m_test = pd.concat(melanoma_groups_list[:70]) # 191
df_m_train = pd.concat(melanoma_groups_list[70:170]) # 314 - TOTAL: 505 samples
train_split = pd.concat([df_b_train, df_m_train])
valid_split = pd.concat([df_b_test, df_m_test])
"""
train_df=pd.DataFrame(train_split)
validation_df=pd.DataFrame(valid_split)
num_examples = {"trainset" : len(train_df), "testset" : len(validation_df)}
return train_df, validation_df, num_examples
def load_isic_by_patient_server( path='/workspace/melanoma_isic_dataset'):
# Load data
df = pd.read_csv(os.path.join(path,'train_concat.csv'))
train_img_dir = os.path.join(path,'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
df["patient_id"] = df["patient_id"].fillna('nan')
# df.loc[df['patient_id'].isnull()==True]['target'].unique() # 337 rows melanomas
# Split by Patient
patient_groups = df.groupby('patient_id') #37311
melanoma_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if patient_groups.get_group(x)['target'].unique().all()==1] # 4188 - after adding na 4525
benign_groups_list = [patient_groups.get_group(x) for x in patient_groups.groups if 0 in patient_groups.get_group(x)['target'].unique()] # 2055 - 33123
np.random.shuffle(melanoma_groups_list)
np.random.shuffle(benign_groups_list)
df_b_test = pd.concat(benign_groups_list[1800:]) # 4462
df_b_train = pd.concat(benign_groups_list[800:1800]) # 16033 - TOTAL 20495 samples
df_m_test = pd.concat(melanoma_groups_list[170:281]) # 340
df_m_train = pd.concat(melanoma_groups_list[281:800]) # 1970 - TOTAL: 2310 samples
train_split1 = pd.concat([df_b_train, df_m_train])
valid_split1 = pd.concat([df_b_test, df_m_test])
df_b_test = pd.concat(benign_groups_list[130:250]) # 1949
df_b_train = pd.concat(benign_groups_list[250:800]) # 8609 - TOTAL 10558 samples
df_m_test = pd.concat(melanoma_groups_list[1230:]) # 303
df_m_train = pd.concat(melanoma_groups_list[800:1230]) # 1407 - TOTAL 1710 samples
train_split2 = pd.concat([df_b_train, df_m_train])
valid_split2 = pd.concat([df_b_test, df_m_test])
df_b_test = pd.concat(benign_groups_list[:30]) # 519
df_b_train = pd.concat(benign_groups_list[30:130]) # 1551 - TOTAL: 2070 samples
df_m_test = pd.concat(melanoma_groups_list[:70]) # 191
df_m_train = pd.concat(melanoma_groups_list[70:170]) # 314 - TOTAL: 505 samples
train_split3 = pd.concat([df_b_train, df_m_train])
valid_split3 = pd.concat([df_b_test, df_m_test])
train_split = pd.concat([train_split1, train_split2, train_split3])
valid_split = pd.concat([valid_split1, valid_split2, valid_split3])
training_df = pd.DataFrame(train_split) # 25967b 4137m
validation_df = pd.DataFrame(valid_split) # 6575b 969m
num_examples = {"trainset" : len(training_df), "testset" : len(validation_df)}
return training_df, validation_df, num_examples
def load_isic_data(path='/workspace/melanoma_isic_dataset'):
# ISIC Dataset
df = pd.read_csv(os.path.join(path, 'train_concat.csv'))
train_img_dir = os.path.join(path, 'train/train/')
df['image_name'] = [os.path.join(train_img_dir, df.iloc[index]['image_name'] + '.jpg') for index in range(len(df))]
train_split, valid_split = train_test_split (df, stratify=df.target, test_size = 0.20, random_state=42)
train_df=pd.DataFrame(train_split)
validation_df=pd.DataFrame(valid_split)
training_dataset = CustomDataset(df = train_df, train = True, transforms = training_transforms )
testing_dataset = CustomDataset(df = validation_df, train = True, transforms = testing_transforms )
num_examples = {"trainset" : len(training_dataset), "testset" : len(testing_dataset)}
return training_dataset, testing_dataset, num_examples
def load_synthetic_data(data_path, n_imgs):
# Synthetic Dataset
input_images = [str(f) for f in sorted(Path(data_path).rglob('*')) if os.path.isfile(f)]
y = [0 if f.split('.jpg')[0][-1] == '0' else 1 for f in input_images]
n_b, n_m = [int(i) for i in n_imgs.split(',') ]
train_id_list, val_id_list = create_split(data_path, n_b , n_m)
train_img = [input_images[int(i)] for i in train_id_list]
train_gt = [y[int(i)] for i in train_id_list]
test_img = [input_images[int(i)] for i in val_id_list]
test_gt = [y[int(i)] for i in val_id_list]
#train_img, test_img, train_gt, test_gt = train_test_split(input_images, y, stratify=y, test_size=0.2, random_state=3)
synt_train_df = | pd.DataFrame({'image_name': train_img, 'target': train_gt}) | pandas.DataFrame |
import os
import pandas as pd
# https://github.com/CSSEGISandData/COVID-19.git
REPOSITORY = "https://raw.githubusercontent.com/CSSEGISandData"
MAIN_FOLDER = "COVID-19/master/csse_covid_19_data/csse_covid_19_time_series"
CONFIRMED_FILE = "time_series_covid19_confirmed_global.csv"
DEATHS_FILE = "time_series_covid19_deaths_global.csv"
RECOVERED_FILE = "time_series_covid19_recovered_global.csv"
CONFIRMED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, CONFIRMED_FILE)
DEATHS_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, DEATHS_FILE)
RECOVERED_PATH = os.path.join(REPOSITORY, MAIN_FOLDER, RECOVERED_FILE)
def group_data_by_country(df):
df = df.drop(columns=["Lat", "Long"])
df_bycountry = df.groupby("Country/Region").sum()
# summing for all country
df_bycountry.loc["Total"] = df_bycountry.sum(axis=0)
return df_bycountry
def get_data_normalized(df):
# dividing by the sum
maximums = df.iloc[:, -1]
df_normalized = df.div(maximums.to_numpy(), axis=0)
return df_normalized
def get_data_for_sir(df_death, df_recovered, df_confirmed):
df_recovered_or_passed = df_recovered + df_death
df_infected = df_confirmed - df_recovered_or_passed
return df_recovered_or_passed, df_infected
def extract_process_data():
df_confirmed = pd.read_csv(CONFIRMED_PATH)
df_deaths = pd.read_csv(DEATHS_PATH)
df_recovered = pd.read_csv(RECOVERED_PATH)
df_confirmed_by_country = group_data_by_country(df_confirmed)
df_deaths_by_country = group_data_by_country(df_deaths)
df_recovered_by_country = group_data_by_country(df_recovered)
df_recovered_or_passed_by_country, df_infected_by_country = get_data_for_sir(
df_deaths_by_country, df_recovered_by_country, df_confirmed_by_country
)
return (
add_datetime(df_confirmed_by_country),
add_datetime(df_deaths_by_country),
add_datetime(df_recovered_by_country),
add_datetime(df_recovered_or_passed_by_country),
add_datetime(df_infected_by_country),
)
def add_datetime(df):
df.loc["Time"] = pd.period_range(df.columns[0], df.columns[-1], freq="D")
return df
def data_gouv_vue_ensemble():
"""
Données relatives à l’épidémie de COVID-19 en France : vue d’ensemble
https://www.data.gouv.fr/fr/datasets/donnees-relatives-a-lepidemie-de-covid-19-en-france-vue-densemble/#_
Columns:
date
total_cas_confirmes
total_deces_hopital
total_deces_ehpad
total_cas_confirmes_ehpad
total_cas_possibles_ehpad
patients_reanimation
patients_hospitalises
total_patients_gueris
nouveaux_patients_hospitalises
nouveaux_patients_reanimation
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71"
)
df = pd.read_csv(url_stable)
df.index = pd.to_datetime(df["date"])
df = df.drop(columns=["date"])
df = df.sort_index()
return df
def data_gouv_taux_incidence():
"""
Indicateurs de l’activité épidémique:
taux d'incidence de l'épidémie de COVID-19 par métropole
https://www.data.gouv.fr/fr/datasets/indicateurs-de-lactivite-epidemique-taux-dincidence-de-lepidemie-de-covid-19-par-metropole/
Columns:
epci2020: Code EPCI
semaine_glissante: Semaine glissante
clage_65:
0 si taux d'incidence toute classe d'âge
65 si taux d'incidence pour les personnes âgées de plus de 65 ans
ti: Nombre de nouveaux cas positifs pour 100 000 habitants sur 7 jours glissants
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/61533034-0f2f-4b16-9a6d-28ffabb33a02"
)
df_main = pd.read_csv(url_stable)
df_main = df_main.rename(columns={"epci2020": "EPCI"})
# Correspondences entre le code EPCI et le nom des métropoles
url_epci = "doc/metropole-epci.csv"
df_epci = pd.read_csv(url_epci, sep=";")
df = pd.merge(df_main, df_epci, how="left", on="EPCI")
df_65 = df.loc[df["clage_65"] == 65]
df_65["semaine_glissante"] = [i[11:] for i in df_65["semaine_glissante"]]
df_65.index = pd.to_datetime(df_65["semaine_glissante"])
df_65 = df_65.drop(columns=["semaine_glissante", "clage_65", "EPCI"])
df_65 = df_65.pivot(columns="Metropole", values="ti")
df_0 = df.loc[df["clage_65"] == 0]
df_0["semaine_glissante"] = [i[11:] for i in df_0["semaine_glissante"]]
df_0.index = pd.to_datetime(df_0["semaine_glissante"])
df_0 = df_0.drop(columns=["semaine_glissante", "clage_65", "EPCI"])
df_0 = df_0.pivot(columns="Metropole", values="ti")
return df, df_65, df_0
def data_gouv_hospital():
"""
Les données relatives à les nouvelles admissions en réanimation par région :
nombre de nouveaux patients admis en réanimation dans les 24 dernières heures.
Columns:
sexe:
0: femmes + hommes
1: hommes
2: femmes
dep: Département
hosp: Nombre de personnes actuellement hospitalisées
rea: Nombre de personnes actuellement en réanimation ou soins intensifs
rad: Nombre cumulé de personnes retournées à domicile
dc: Nombre cumulé de personnes décédées à l'hôpital
"""
url_stable = (
"https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"
)
df_main = pd.read_csv(url_stable, sep=";")
df_main.index = pd.to_datetime(df_main["jour"])
df_main = df_main.loc[df_main["sexe"] == 0]
df_hosp = df_main.drop(columns=["sexe", "jour", "rea", "rad", "dc"])
df_hosp = df_hosp.pivot(columns="dep", values="hosp")
df_rea = df_main.drop(columns=["sexe", "jour", "hosp", "rad", "dc"])
df_rea = df_rea.pivot(columns="dep", values="rea")
df_dc = df_main.drop(columns=["sexe", "jour", "rea", "rad", "hosp"])
df_dc = df_dc.pivot(columns="dep", values="dc")
return df_hosp, df_rea, df_dc
def data_gouv_vaccination():
"""
https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/#_
Columns:
reg
sexe:
0 : hommes + femmes + Non renseigné
1 : homme
2 : femme
n_dose1
n_dose2
n_cum_dose1
n_cum_dose2
"""
url_regional = (
"https://www.data.gouv.fr/fr/datasets/r/96db2c1a-8c0c-413c-9a07-f6f62f3d4daf"
)
url_national = (
"https://www.data.gouv.fr/fr/datasets/r/349ca785-cf12-4f4d-9a0a-846d53dce996"
)
df_region = | pd.read_csv(url_regional, sep=";") | pandas.read_csv |
import os
import random
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import torch
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from scripts.ssc.evaluation.mldl_copied import CompPerformMetrics
from src.datasets.datasets import SwissRoll, SwissRoll_manifold
from src.evaluation.eval import Multi_Evaluation
from src.models.COREL.eval_engine import get_latentspace_representation
from src.models.WitnessComplexAE.wc_ae import WitnessComplexAutoencoder
from src.models.autoencoder.autoencoders import Autoencoder_MLP_topoae
def update_dict(dict, ks, metric, result):
for i, k in enumerate(ks):
dict.update({metric+'_k{}'.format(k): result[metric][i]})
return dict
def plot_dist_comparison(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels})
latents = pd.DataFrame({'x': Z_latent[:, 0], 'y': Z_latent[:, 1],'label': labels})
print('compute distances')
pwd_Z = pairwise_distances(Z_eval, Z_eval, n_jobs=2)
pwd_Ztrue = pairwise_distances(data_manifold, data_manifold, n_jobs=2)
print('normalize distances')
#normalize distances
pwd_Ztrue = (pwd_Ztrue-pwd_Ztrue.min())/(pwd_Ztrue.max()-pwd_Ztrue.min())
pwd_Z = (pwd_Z-pwd_Z.min())/(pwd_Z.max()-pwd_Z.min())
print('flatten')
#flatten
pwd_Ztrue = pwd_Ztrue.flatten()
pwd_Z = pwd_Z.flatten()
ind = random.sample(range(len(pwd_Z)), 2**12)
distances = pd.DataFrame({'Distances on $\mathcal{M}$': pwd_Ztrue[ind], 'Distances in $\mathcal{Z}$': pwd_Z[ind]})
print('plot')
#plot
fig, ax = plt.subplots(1,3, figsize=(3*10, 10))
sns.scatterplot(x = 'Distances on $\mathcal{M}$', y = 'Distances in $\mathcal{Z}$',data = distances, ax = ax[1], edgecolor = None,alpha=0.3)
#ax[0].set(xlabel='Distances on $\mathcal{M}$', ylabel='Distances in $\mathcal{Z}$',fontsize=25)
ax[1].xaxis.label.set_size(20)
ax[1].yaxis.label.set_size(20)
ax[1].set_title('Comparison of pairwise distances',fontsize=24,pad=20)
sns.scatterplot(y = 'x', x = 'y', hue='label', data = manifold,ax = ax[0],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[0].set_title('True manifold ($\mathcal{M}$)',fontsize=24,pad=20)
ax[0].set(xlabel="", ylabel="")
ax[0].set_yticks([])
sns.scatterplot(x = 'x', y = 'y',hue='label', data = latents,ax = ax[2],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[2].set_title('Latent space ($\mathcal{Z}$)',fontsize=24,pad=20)
ax[2].set(xlabel="", ylabel="")
ax[2].set_yticks([])
fig.tight_layout(pad=5)
if path_to_save != None and name != None:
print('save plot')
fig.savefig(os.path.join(path_to_save,'{}_4.pdf'.format(name)),dpi = 100)
plt.show()
plt.close()
return (np.square(pwd_Ztrue - pwd_Z)).mean()
def plot_dist_comparison2(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels})
latents = pd.DataFrame({'x': Z_latent[:, 0], 'y': Z_latent[:, 1],'label': labels})
print('compute distances')
pwd_Z = pairwise_distances(Z_eval, Z_eval, n_jobs=2)
pwd_Ztrue = pairwise_distances(data_manifold, data_manifold, n_jobs=2)
print('normalize distances')
#normalize distances
pwd_Ztrue = (pwd_Ztrue-pwd_Ztrue.min())/(pwd_Ztrue.max()-pwd_Ztrue.min())
pwd_Z = (pwd_Z-pwd_Z.min())/(pwd_Z.max()-pwd_Z.min())
print('flatten')
#flatten
pwd_Ztrue = pwd_Ztrue.flatten()
pwd_Z = pwd_Z.flatten()
ind = random.sample(range(len(pwd_Z)), 2**12)
distances = pd.DataFrame({'Distances on $\mathcal{M}$': pwd_Ztrue[ind], 'Distances in $\mathcal{Z}$': pwd_Z[ind]})
print('plot')
#plot
fig, ax = plt.subplots(2,1, figsize=(10, 20))
sns.scatterplot(x = 'Distances on $\mathcal{M}$', y = 'Distances in $\mathcal{Z}$',data = distances, ax = ax[1], edgecolor = None,alpha=0.3)
#ax[0].set(xlabel='Distances on $\mathcal{M}$', ylabel='Distances in $\mathcal{Z}$',fontsize=25)
ax[1].xaxis.label.set_size(20)
ax[1].yaxis.label.set_size(20)
ax[1].set_title('Comparison of pairwise distances',fontsize=24,pad=20)
lims = [max(0, 0), min(1, 1)]
ax[1].plot(lims, lims, '--',linewidth=5, color = 'black')
sns.scatterplot(x = 'x', y = 'y',hue='label', data = latents,ax = ax[0],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[0].set_title('Latent space ($\mathcal{Z}$)',fontsize=24,pad=20)
ax[0].set(xlabel="", ylabel="")
ax[0].set_yticks([])
ax[0].set_xticks([])
fig.tight_layout(pad=5)
if path_to_save != None and name != None:
print('save plot')
fig.savefig(os.path.join(path_to_save,'{}_5.pdf'.format(name)),dpi = 100)
plt.show()
plt.close()
return (np.square(pwd_Ztrue - pwd_Z)).mean()
if __name__ == "__main__":
# create df and set path to save
df_tot = | pd.DataFrame() | pandas.DataFrame |
import logging
from typing import Any, Union, List, Tuple, Dict
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from ctrainlib.fplib import filter_fingerprints, search_fingerprint_thresholds, get_data_from_fingerprints
from ctrainlib.models import CVClassifier, NestedClusterCVClassifier, Classifier, Regressor
from ctrainlib.rdkit_support import mols_to_sdf, get_mols_from_smiles
def do_training(estimator: Any,
name: str,
est_options: Dict[str, Any],
thresholds: List[float],
categories: List[str],
x_data: DataFrame,
y_data: Series,
cv_clf: int,
cluster_clf: int,
fp_filter: Union[str, float],
feature_scaling: List[str],
random_seed: int,
smiles: List[str],
regression: bool) -> Union[Classifier, Regressor]:
"""
Does the complete training with optional fingerprint filtering, feature scaling, cross validation or
clustering and training with a nested cluster cross validation.
Parameters
----------
estimator : Any
Scikit-learn like estimator class to use for training
name : str
Name of the resulting model
est_options : Dict[str, Any]
Additional estimator options passed to the estimator constructor
thresholds : List[float]
List of thresholds to build the classes from
categories : List[str]
Names for the generated classes
x_data : DataFrame
Descriptors and fingerprints that should be used for training
y_data : Series
Series containing the training values
cv_clf : int
If provided, a ``cv_clf``-fold cross validation is performed for training
cluster_clf : int
If provided, a ``cv_clf``-fold nested cluster cross validation is performed for training.
For clustering, `KMeans` is used.
fp_filter : Union[str, float]
A float value between 0.0 and 1.0 to use as threshold for fingerprint bit variance filtering,
or the string "``auto``" to search for the best variance threshold.
feature_scaling : List[str]
A list of columns to **NOT** use for feature_scaling. If all columns in x_data should be scaled,
than set ``feature_scaling`` to an empty list. If None, no feature scaling is performed.
random_seed : int
Random seed to use for all actions that require randomness (eg. KMeans clustering, training a
RandomForestClassifier or splitting x_data into several folds for cross validation)
smiles : List[str]
SMILES for exporting clustering results
regression : bool
True if estimator is a regressor and not a classifier
Returns
-------
Union[Classifier, Regressor]
A Classifier or Regressor instance
Raises
------
Exception
All exceptions that occur during training are raised
"""
if not regression:
# Segment the values. We only have the internal thresholds and cut() wants the min/max values
# so create the full bins. Need to be careful that the min/max are indeed smaller/larger than
# the bounding values.
low = min(y_data.min(), thresholds[0]) - 0.00001
high = max(y_data.max(), thresholds[-1]) + 0.00001
bins = [low] + thresholds + [high]
train_categories = [str(x) for x in range(len(categories))]
y_data = | pd.cut(y_data, bins=bins, include_lowest=True, right=False, labels=train_categories) | pandas.cut |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
from typing import List, Tuple, Union
from pathlib import Path
# pip install pandas
import pandas as pd
# pip install selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import NoSuchElementException
# TODO: Using WebDriverWait instead implicitly_wait
# TODO: Using logging instead print
# TODO: price must be decimal
def parse(url: str) -> List[Tuple[str, str, str]]:
options = Options()
options.add_argument('--headless')
items = []
driver = webdriver.Firefox(options=options)
driver.implicitly_wait(10)
try:
while True:
print('Load:', url)
driver.get(url)
for item_el in driver.find_elements_by_css_selector(".goods-tile"):
name = item_el.find_element_by_css_selector('.goods-tile__title').text
# Не у всех товаров есть цена
try:
price = item_el.find_element_by_css_selector('.goods-tile__price-value').text
except NoSuchElementException:
price = '-'
nal = item_el.find_element_by_css_selector('.goods-tile__availability').text
row = name, price, nal
print(row)
items.append(row)
# Если есть кнопка перехода на следующую страницу, то продолжаем цикл, иначе завершаем
try:
a_next_page = driver.find_element_by_css_selector('a.pagination__direction_type_forward[href]')
url = a_next_page.get_attribute('href')
except NoSuchElementException:
break
finally:
driver.quit()
return items
def save_goods(
file_name: Union[str, Path],
items: List[Tuple[str, str, str]],
encoding='utf-8'
):
df = | pd.DataFrame(items, columns=['Name', 'Price', 'Nal']) | pandas.DataFrame |
import os
import torch.nn as nn
import cv2
import json
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIAlign, RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
import pycocotools.mask as maskUtils
from skimage import measure
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# Use torchvision ops for CPU mode instead
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
if not m.aligned:
# aligned=False is not implemented on CPU
# set use_torchvision on-the-fly
m.use_torchvision = True
warnings.warn('We set use_torchvision=True in CPU mode.')
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)[0]
return result
async def async_inference_detector(model, img):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
Awaitable detection results.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = Compose(cfg.data.test.pipeline)
# prepare data
data = dict(img_info=dict(filename=img), img_prefix=None)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
result = await model.aforward_test(rescale=True, **data)
return result
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show()
def json_generation(file_name, img, result):
with open('Result_json/'+ os.path.splitext(file_name)[0]+'.json', 'w') as fp:
fp.write("{")
fp.write('"shape"')
fp.write(':')
fp.write('[\n')
CLASSES = ('lawn, flower_garden', 'forest', 'river', 'road', 'pavement',
'parking_lot', 'crosswalk', 'hiking_trail', 'trail', 'flower_bed')
segm_json_results = []
det, seg = result[0], result[1]
for label in range(len(det)):
bboxes = det[label]
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['label'] = CLASSES[label]
data['points'] = binary_to_polygon(segms[i])
data['shape_type'] = "polygon"
segm_json_results.append(data)
# writing JSON object
with open('Result_json/'+os.path.splitext(file_name)[0]+'.json', 'a') as f:
| pd.Series(data) | pandas.Series |
import sys, warnings, operator
import json
import time
import types
import numbers
import inspect
import itertools
import string
import unicodedata
import datetime as dt
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from distutils.version import LooseVersion as _LooseVersion
from functools import partial
from threading import Thread, Event
from types import FunctionType
import numpy as np
import param
# Python3 compatibility
if sys.version_info.major >= 3:
import builtins as builtins # noqa (compatibility)
if sys.version_info.minor > 3:
from collections.abc import Iterable # noqa (compatibility)
else:
from collections import Iterable # noqa (compatibility)
basestring = str
unicode = str
long = int
cmp = lambda a, b: (a>b)-(a<b)
generator_types = (zip, range, types.GeneratorType)
RecursionError = RecursionError if sys.version_info.minor > 4 else RuntimeError # noqa
_getargspec = inspect.getfullargspec
get_keywords = operator.attrgetter('varkw')
LooseVersion = _LooseVersion
else:
import __builtin__ as builtins # noqa (compatibility)
from collections import Iterable # noqa (compatibility)
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType) # noqa
RecursionError = RuntimeError
_getargspec = inspect.getargspec
get_keywords = operator.attrgetter('keywords')
class LooseVersion(_LooseVersion):
"""
Subclassed to avoid unicode issues in python2
"""
def __init__ (self, vstring=None):
if isinstance(vstring, unicode):
vstring = str(vstring)
self.parse(vstring)
def __cmp__(self, other):
if isinstance(other, unicode):
other = str(other)
if isinstance(other, basestring):
other = LooseVersion(other)
return cmp(self.version, other.version)
numpy_version = LooseVersion(np.__version__)
param_version = LooseVersion(param.__version__)
datetime_types = (np.datetime64, dt.datetime, dt.date, dt.time)
timedelta_types = (np.timedelta64, dt.timedelta,)
arraylike_types = (np.ndarray,)
masked_types = ()
try:
import pandas as pd
except ImportError:
pd = None
if pd:
pandas_version = LooseVersion(pd.__version__)
try:
if pandas_version >= '0.24.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
elif pandas_version > '0.20.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
else:
from pandas.types.dtypes import DatetimeTZDtypeType
from pandas.types.dtypes.generic import ABCSeries, ABCIndexClass
pandas_datetime_types = (pd.Timestamp, DatetimeTZDtypeType, pd.Period)
pandas_timedelta_types = (pd.Timedelta,)
datetime_types = datetime_types + pandas_datetime_types
timedelta_types = timedelta_types + pandas_timedelta_types
arraylike_types = arraylike_types + (ABCSeries, ABCIndexClass)
if pandas_version > '0.23.0':
from pandas.core.dtypes.generic import ABCExtensionArray
arraylike_types = arraylike_types + (ABCExtensionArray,)
if pandas_version > '1.0':
from pandas.core.arrays.masked import BaseMaskedArray
masked_types = (BaseMaskedArray,)
except Exception as e:
param.main.param.warning('pandas could not register all extension types '
'imports failed with the following error: %s' % e)
try:
import cftime
cftime_types = (cftime.datetime,)
datetime_types += cftime_types
except:
cftime_types = ()
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
class VersionError(Exception):
"Raised when there is a library version mismatch."
def __init__(self, msg, version=None, min_version=None, **kwargs):
self.version = version
self.min_version = min_version
super(VersionError, self).__init__(msg, **kwargs)
class Config(param.ParameterizedFunction):
"""
Set of boolean configuration values to change HoloViews' global
behavior. Typically used to control warnings relating to
deprecations or set global parameter such as style 'themes'.
"""
future_deprecations = param.Boolean(default=False, doc="""
Whether to warn about future deprecations""")
image_rtol = param.Number(default=10e-4, doc="""
The tolerance used to enforce regular sampling for regular,
gridded data where regular sampling is expected. Expressed as the
maximal allowable sampling difference between sample
locations.""")
no_padding = param.Boolean(default=False, doc="""
Disable default padding (introduced in 1.13.0).""")
warn_options_call = param.Boolean(default=True, doc="""
Whether to warn when the deprecated __call__ options syntax is
used (the opts method should now be used instead). It is
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
default_cmap = param.String(default='kbc_r', doc="""
Global default colormap. Prior to HoloViews 1.14.0, the default
value was 'fire' which can be set for backwards compatibility.""")
default_gridded_cmap = param.String(default='kbc_r', doc="""
Global default colormap for gridded elements (i.e. Image, Raster
and QuadMesh). Can be set to 'fire' to match raster defaults
prior to HoloViews 1.14.0 while allowing the default_cmap to be
the value of 'kbc_r' used in HoloViews >= 1.14.0""")
default_heatmap_cmap = param.String(default='kbc_r', doc="""
Global default colormap for HeatMap elements. Prior to HoloViews
1.14.0, the default value was the 'RdYlBu_r' colormap.""")
def __call__(self, **params):
self.param.set_param(**params)
return self
config = Config()
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g. tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return obj.to_csv(header=True).encode('utf-8')
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
def merge_option_dicts(old_opts, new_opts):
"""
Update the old_opts option dictionary with the options defined in
new_opts. Instead of a shallow update as would be performed by calling
old_opts.update(new_opts), this updates the dictionaries of all option
types separately.
Given two dictionaries
old_opts = {'a': {'x': 'old', 'y': 'old'}}
and
new_opts = {'a': {'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
this returns a dictionary
{'a': {'x': 'old', 'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
"""
merged = dict(old_opts)
for option_type, options in new_opts.items():
if option_type not in merged:
merged[option_type] = {}
merged[option_type].update(options)
return merged
def merge_options_to_dict(options):
"""
Given a collection of Option objects or partial option dictionaries,
merge everything to a single dictionary.
"""
merged_options = {}
for obj in options:
if isinstance(obj,dict):
new_opts = obj
else:
new_opts = {obj.key: obj.kwargs}
merged_options = merge_option_dicts(merged_options, new_opts)
return merged_options
def deprecated_opts_signature(args, kwargs):
"""
Utility to help with the deprecation of the old .opts method signature
Returns whether opts.apply_groups should be used (as a bool) and the
corresponding options.
"""
from .options import Options
groups = set(Options._option_groups)
opts = {kw for kw in kwargs if kw != 'clone'}
apply_groups = False
options = None
new_kwargs = {}
if len(args) > 0 and isinstance(args[0], dict):
apply_groups = True
if (not set(args[0]).issubset(groups) and
all(isinstance(v, dict) and not set(v).issubset(groups)
for v in args[0].values())):
apply_groups = False
elif set(args[0].keys()) <= groups:
new_kwargs = args[0]
else:
options = args[0]
elif opts and opts.issubset(set(groups)):
apply_groups = True
elif kwargs.get('options', None) is not None:
apply_groups = True
elif not args and not kwargs:
apply_groups = True
return apply_groups, options, new_kwargs
class periodic(Thread):
"""
Run a callback count times with a given period without blocking.
If count is None, will run till timeout (which may be forever if None).
"""
def __init__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
if block is False and count is None and timeout is None:
raise ValueError('When using a non-blocking thread, please specify '
'either a count or a timeout')
super(periodic, self).__init__()
self.period = period
self.callback = callback
self.count = count
self.counter = 0
self.block = block
self.timeout = timeout
self._completed = Event()
self._start_time = None
@property
def completed(self):
return self._completed.is_set()
def start(self):
self._start_time = time.time()
if self.block is False:
super(periodic,self).start()
else:
self.run()
def stop(self):
self.timeout = None
self._completed.set()
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def run(self):
while not self.completed:
if self.block:
time.sleep(self.period)
else:
self._completed.wait(self.period)
self.counter += 1
try:
self.callback(self.counter)
except Exception:
self.stop()
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper()
def argspec(callable_obj):
"""
Returns an ArgSpec object for functions, staticmethods, instance
methods, classmethods and partials.
Note that the args list for instance and class methods are those as
seen by the user. In other words, the first argument which is
conventionally called 'self' or 'cls' is omitted in these cases.
"""
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
# Parameterized function.__call__ considered function in py3 but not py2
spec = _getargspec(callable_obj.__call__)
args = spec.args[1:]
elif inspect.isfunction(callable_obj): # functions and staticmethods
spec = _getargspec(callable_obj)
args = spec.args
elif isinstance(callable_obj, partial): # partials
arglen = len(callable_obj.args)
spec = _getargspec(callable_obj.func)
args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords]
elif inspect.ismethod(callable_obj): # instance and class methods
spec = _getargspec(callable_obj)
args = spec.args[1:]
else: # callable objects
return argspec(callable_obj.__call__)
return inspect.ArgSpec(args=args,
varargs=spec.varargs,
keywords=get_keywords(spec),
defaults=spec.defaults)
def validate_dynamic_argspec(callback, kdims, streams):
"""
Utility used by DynamicMap to ensure the supplied callback has an
appropriate signature.
If validation succeeds, returns a list of strings to be zipped with
the positional arguments, i.e. kdim values. The zipped values can then
be merged with the stream values to pass everything to the Callable
as keywords.
If the callbacks use *args, None is returned to indicate that kdim
values must be passed to the Callable by position. In this
situation, Callable passes *args and **kwargs directly to the
callback.
If the callback doesn't use **kwargs, the accepted keywords are
validated against the stream parameter names.
"""
argspec = callback.argspec
name = callback.name
kdims = [kdim.name for kdim in kdims]
stream_params = stream_parameters(streams)
defaults = argspec.defaults if argspec.defaults else []
all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args
# Filter out any posargs for streams
posargs = [arg for arg in all_posargs if arg not in stream_params]
kwargs = argspec.args[-len(defaults):]
if argspec.keywords is None:
unassigned_streams = set(stream_params) - set(argspec.args)
if unassigned_streams:
unassigned = ','.join(unassigned_streams)
raise KeyError('Callable {name!r} missing keywords to '
'accept stream parameters: {unassigned}'.format(name=name,
unassigned=unassigned))
if len(posargs) > len(kdims) + len(stream_params):
raise KeyError('Callable {name!r} accepts more positional arguments than '
'there are kdims and stream parameters'.format(name=name))
if kdims == []: # Can be no posargs, stream kwargs already validated
return []
if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs
return kdims
elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names
if argspec.args[:len(kdims)] != posargs:
raise KeyError('Unmatched positional kdim arguments only allowed at '
'the start of the signature of {name!r}'.format(name=name))
return posargs
elif argspec.varargs: # Posargs missing, passed to Callable directly
return None
elif set(posargs) - set(kdims):
raise KeyError('Callable {name!r} accepts more positional arguments {posargs} '
'than there are key dimensions {kdims}'.format(name=name,
posargs=posargs,
kdims=kdims))
elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword
return kdims
elif set(kdims).issubset(set(posargs+kwargs)):
return kdims
elif argspec.keywords:
return kdims
else:
raise KeyError('Callback {name!r} signature over {names} does not accommodate '
'required kdims {kdims}'.format(name=name,
names=list(set(posargs+kwargs)),
kdims=kdims))
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.param):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
else:
return meth.__func__.__qualname__.replace('.__call__', '')
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except Exception:
return str(callable_obj)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e. :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e. the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if getattr(getattr(key, 'dtype', None), 'kind', None) == 'b':
return key
wrapped_key = wrap_tuple(key)
ellipse_count = sum(1 for k in wrapped_key if k is Ellipsis)
if ellipse_count == 0:
return key
elif ellipse_count != 1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e. the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def get_method_owner(method):
"""
Gets the instance that owns the supplied method
"""
if isinstance(method, partial):
method = method.func
return method.__self__ if sys.version_info.major >= 3 else method.im_self
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitutions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggressive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitution
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % (name, self.disallowed))
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isscalar(val):
"""
Value is scalar or None
"""
return val is None or np.isscalar(val) or isinstance(val, datetime_types)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def asarray(arraylike, strict=True):
"""
Converts arraylike objects to NumPy ndarray types. Errors if
object is not arraylike and strict option is enabled.
"""
if isinstance(arraylike, np.ndarray):
return arraylike
elif isinstance(arraylike, list):
return np.asarray(arraylike, dtype=object)
elif not isinstance(arraylike, np.ndarray) and isinstance(arraylike, arraylike_types):
return arraylike.values
elif hasattr(arraylike, '__array__'):
return np.asarray(arraylike)
elif strict:
raise ValueError('Could not convert %s type to array' % type(arraylike))
return arraylike
nat_as_integer = np.datetime64('NAT').view('i8')
def isnat(val):
"""
Checks if the value is a NaT. Should only be called on datetimelike objects.
"""
if (isinstance(val, (np.datetime64, np.timedelta64)) or
(isinstance(val, np.ndarray) and val.dtype.kind == 'M')):
if numpy_version >= '1.13':
return np.isnat(val)
else:
return val.view('i8') == nat_as_integer
elif pd and val is pd.NaT:
return True
elif pd and isinstance(val, pandas_datetime_types+pandas_timedelta_types):
return | pd.isna(val) | pandas.isna |
"""
data_prep.py - Extract data from date range and create models
Usage:
data_prep.py [options]
data_prep.py -h | --help
Options:
-h --help Show this message.
--output_folder=OUT Output folder for the data and reports to be saved.
"""
from __future__ import print_function
import pandas as pd
import numpy as np
import re
import os
import docopt
import sys
import pickle
import os.path
from datetime import datetime, date, time
from dateutil.parser import parse
from time import strftime
import pyarrow
import json
import git
from tqdm import tqdm
from covidify.config import REPO, TMP_FOLDER, TMP_GIT, DATA
args = docopt.docopt(__doc__)
out = args['--output_folder']
def clean_sheet_names(new_ranges):
indices = []
# Remove all sheets that dont have a numeric header
numeric_sheets = [x for x in new_ranges if re.search(r'\d', x)]
return numeric_sheets
def clone_repo(TMP_FOLDER, REPO):
print('Cloning Data Repo...')
git.Git(TMP_FOLDER).clone(REPO)
# Create Tmp Folder
if not os.path.isdir(TMP_FOLDER):
print('Creating folder...')
print('...', TMP_FOLDER)
os.mkdir(TMP_FOLDER)
#Check if repo exists
#git pull if it does
if not os.path.isdir(TMP_GIT):
clone_repo(TMP_FOLDER, REPO)
else:
try:
print('git pull from', REPO)
rep = git.Repo(TMP_GIT)
rep.remotes.origin.pull()
except:
print('Could not pull from', REPO)
sys.exit()
sheets = os.listdir(DATA)
# Clean the result to the sheet tabs we want
print('Getting sheets...')
cleaned_sheets = clean_sheet_names(sheets)
def clean_last_updated(last_update):
'''
convert date and time in YYYYMMDD HMS format
'''
date = parse(str(last_update).split(' ')[0]).strftime("%Y-%m-%d")
time = parse(str(last_update).split(' ')[1]).strftime('%H:%M:%S')
parsed_date = str(date) + ' ' + str(time)
return parsed_date
def get_date(last_update):
return parse(str(last_update).split(' ')[0]).strftime("%Y-%m-%d")
def get_csv_date(file):
return get_date(file.split('.')[0] + ' ')
def drop_duplicates(df_raw):
'''
Take the max date value for each province for a given date
'''
days_list = []
for datetime in df_raw.date.unique():
tmp_df = df_raw[df_raw.date == datetime]
tmp_df = tmp_df.sort_values(['Last Update']).drop_duplicates('Province/State', keep='last')
days_list.append(tmp_df)
return days_list
keep_cols = ['Confirmed', 'Country/Region', 'Deaths', 'Last Update', 'Province/State', 'Recovered']
numeric_cols = ['Confirmed', 'Deaths', 'Recovered']
def get_data(cleaned_sheets):
all_csv = []
# Import all CSV's
for file in tqdm(sorted(sheets), desc='... importing data: '):
if 'csv' in file:
# print('...', file)
tmp_df = pd.read_csv(os.path.join(DATA, file), index_col=None, header=0, parse_dates=['Last Update'])
tmp_df = tmp_df[keep_cols]
tmp_df[numeric_cols] = tmp_df[numeric_cols].fillna(0)
tmp_df[numeric_cols] = tmp_df[numeric_cols].astype(int)
tmp_df['Province/State'].fillna(tmp_df['Country/Region'], inplace=True) #If no region given, fill it with country
tmp_df['Last Update'] = tmp_df['Last Update'].apply(clean_last_updated)
tmp_df['date'] = tmp_df['Last Update'].apply(get_date)
tmp_df['file_date'] = get_csv_date(file)
all_csv.append(tmp_df)
# concatenate all csv's into one df
df_raw = pd.concat(all_csv, axis=0, ignore_index=True, sort=True)
df_raw = df_raw.sort_values(by=['Last Update'])
frames = drop_duplicates(df_raw)
tmp = | pd.concat(frames, axis=0, ignore_index=True, sort=True) | pandas.concat |
"""
Class Features
Name: lib_data_io_ascii
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Libraries
import logging
import os
import itertools
from abc import ABC
from copy import deepcopy
import numpy as np
import pandas as pd
from hmc.algorithm.default.lib_default_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Super class to wrap dataframe behaviour
class DFrameCustom(pd.DataFrame, ABC):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read state point file
def read_state_point(file_name, file_time, var_name='state', file_time_start=None, file_time_end=None, file_time_frequency='H',
file_columns_type=None, file_columns_name=None, list_columns_excluded=None):
if file_columns_type is None:
file_columns_type = {0: 'dset'}
file_type = list(file_columns_type.values())
if file_time_start == file_time_end:
time_range = pd.DatetimeIndex([file_time_end])
time_n = time_range.__len__()
else:
log_stream.error(' ===> Time steps conditions are not supported!')
raise NotImplementedError('Case not implemented')
if isinstance(file_name, list):
file_name = file_name[0]
dframe_summary = {}
if os.path.exists(file_name):
file_table = pd.read_table(file_name, header=None)
file_row_values = file_table.values.tolist()
id_tot = 0
data_obj = {}
for name_id, name_step in enumerate(file_columns_name):
for type_id, type_step in enumerate(file_type):
file_row_tmp = file_row_values[id_tot]
file_row_step = file_row_tmp[0].strip().split()
if type_step not in list_columns_excluded:
if type_step == 'dam_index':
row_data = [int(elem) for elem in file_row_step]
else:
row_data = [float(elem) for elem in file_row_step]
if type_step not in list(data_obj.keys()):
data_obj[type_step] = {}
data_obj[type_step][name_step] = row_data
id_tot += 1
for var_id, (var_key, var_ts) in enumerate(data_obj.items()):
for var_pivot, var_data in var_ts.items():
dframe_pnt = DFrameCustom(index=time_range)
dframe_pnt.name = var_name
dframe_tmp = pd.DataFrame(index=time_range, data=var_data, columns=[var_pivot])
dframe_tmp.index.name = 'Time'
series_filled = dframe_tmp.iloc[:, 0]
dframe_pnt[var_pivot] = series_filled
if var_key not in list(dframe_summary.keys()):
dframe_summary[var_key] = dframe_pnt
else:
dframe_tmp = dframe_summary[var_key]
dframe_join = dframe_tmp.join(dframe_pnt, how='right')
dframe_join.name = var_name
dframe_summary[var_key] = dframe_join
else:
dframe_summary = None
return dframe_summary
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read outcome point file
def read_outcome_point(file_name, file_time, file_columns=None, file_map=None, file_ancillary=None):
if file_columns is None:
file_columns = {0: 'dset'}
if not isinstance(file_name, list):
file_name = [file_name]
data_obj = {}
time_step_expected = []
time_step_exists = []
for file_n, (file_step, time_step) in enumerate(zip(file_name, file_time)):
time_step_expected.append(time_step)
if os.path.exists(file_step):
file_size = os.path.getsize(file_step)
if file_size > 0:
file_table = pd.read_table(file_step, header=None)
time_step_exists.append(time_step)
for row_id, row_value in zip(file_table.index, file_table.values):
if row_value.__len__() == 1:
row_value = row_value[0]
else:
raise NotImplementedError(' ===> Length list not allowed')
if row_id not in list(data_obj.keys()):
data_obj[row_id] = [row_value]
else:
row_tmp = data_obj[row_id]
row_tmp.append(row_value)
data_obj[row_id] = row_tmp
else:
log_stream.warning(' ===> Size of ' + file_step + ' is equal to zero. File is empty.')
data_obj = None
if data_obj is not None:
data_var = {}
for data_id, (data_ref, data_ts) in enumerate(data_obj.items()):
if file_ancillary is not None:
data_name = list(file_ancillary.keys())[data_id]
else:
data_name = data_ref
for tag_columns in file_columns.values():
if tag_columns not in list(data_var.keys()):
data_var[tag_columns] = {}
data_var[tag_columns][data_name] = {}
data_var[tag_columns][data_name] = data_ts
time_n = time_step_expected.__len__()
var_data_expected = [-9999.0] * time_n
dframe_summary = {}
dframe_merged = pd.DataFrame(index=time_step_expected)
for var_id, (var_key, var_ts) in enumerate(data_var.items()):
for var_pivot, var_data_defined in var_ts.items():
if file_map is not None:
if var_pivot in list(file_ancillary.keys()):
for map_key, map_fields in file_map.items():
var_data_ancillary = file_ancillary[var_pivot][map_key]
var_lim_min = map_fields['limits'][0]
var_lim_max = map_fields['limits'][1]
if map_fields['type'] == 'constant':
assert np.isscalar(var_data_ancillary)
else:
log_stream.error(' ===> Map key "' + map_key + '" type is not allowed.')
raise NotImplementedError('Case not implemented yet')
if map_key == 'section_baseflow':
var_data_tmp = deepcopy(var_data_defined)
var_data_defined = []
for value_tmp in var_data_tmp:
value_step = value_tmp + var_data_ancillary
if (var_lim_min is not None) and (value_step < var_lim_min):
value_step = value_tmp
if (var_lim_max is not None) and (value_step > var_lim_max):
value_step = var_lim_max
var_data_defined.append(value_step)
dframe_expected = | pd.DataFrame(index=time_step_expected, data=var_data_expected, columns=[var_pivot]) | pandas.DataFrame |
import os
from tensorflow.python.platform import gfile
import tensorflow as tf
import re
import random
import datetime as dt
import moviepy.editor as mpy
import pandas as pd
import json
from matplotlib import pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
import numpy as np
import seaborn as sn
from moviepy.editor import VideoFileClip
import glob as glob
from PIL import Image
from settings import FLAGS
import cv2 as cv2
from .video import compute_dense_optical_flow_for_batch
def get_subdirectory_files(dir, depth=1):
depth_string = dir
for i in range(depth):
depth_string += '*/*'
return glob.glob(depth_string)
def files_from_directory(dir_str, file_type):
file_paths = gfile.Glob(os.path.join(dir_str, file_type))
return [os.path.basename(i) for i in file_paths]
def file_paths_from_directory(dir_str, file_type):
file_paths = gfile.Glob(os.path.join(dir_str, file_type))
return file_paths
def get_filename_and_filetype_from_path(path):
"""extracts and returns both filename (e.g. 'video_1') and filetype (e.g. 'mp4') from a given absolute path"""
filename = os.path.basename(path)
video_id, filetype = filename.split(".")
return video_id, filetype
def get_metadata_dict_as_bytes(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_video_id_from_path(path_str, type=None):
video_name = os.path.basename(path_str)
if type == 'activity_net':
p = re.compile('^([a-zA-Z0-9_-]+_[0-9]+)_\d{3}')
elif type == 'youtube8m':
p = re.compile('^([a-zA-Z0-9_-]+)_[0-9]+x[0-9]+')
elif type == 'UCF101':
p = re.compile('^([a-zA-Z0-9_-]+)_[0-9]+x[0-9]+')
elif type == 'flyingshapes':
video_id = video_name.split('_')[0]
return video_id
elif type == '20bn_valid' or type == '20bn_train':
return video_name.replace('.avi', '').replace('.mp4', '').split('_')[0]
else: #just return filename without extension
return video_name.replace('.avi', '').replace('.mp4', '')
video_id = p.match(video_name).group(1)
return video_id
def shuffle_files_in_list(paths_list, seed=5):
"""
generates a list of randomly shuffled paths of the files contained in the provided directories
:param paths_list: list with different path locations containing the files
:return: returns two lists, one with the content of all the given directories in the provided order and another
containing the same list randomly shuffled
"""
assert paths_list is not None
all_files = []
for path_entry in paths_list:
print(path_entry)
all_files.extend(file_paths_from_directory(path_entry, '*.avi'))
random.seed(a=seed)
return all_files, random.sample(all_files, len(all_files))
def shuffle_files_in_list_from_categories(paths_list, categories, metadata_path, type='youtube8m', seed=5):
"""
generates a list of randomly shuffled paths of the files contained in the provided directories which match at least one
of the given categories from the 'categories' list. metadata (json file) must be provided to determine a file's category.
:param paths_list: list with different path locations containing the files
:param categories: list that works as a filter, e.g. ['Train'] only gives files that are of category train
:param metadata_path: path to the json file (mostly provided by the dataset authors)
:param type: specifies the dataset (e.g. 'UCF101')
:return: returns two lists, one with the content of all the given directories in the provided order and another
containing the same list randomly shuffled
"""
assert paths_list is not None
assert categories is not None
assert os.path.isfile(metadata_path)
with open(metadata_path) as file:
metadata_file = json.load(file)
all_files = []
# first get all possible files
for path_entry in paths_list:
print(path_entry)
all_files.extend(file_paths_from_directory(path_entry, '*.avi'))
# then discard all files from the list not belonging to one of the given categories
for file_path in all_files:
file_prefix = get_video_id_from_path(file_path, type)
value = next(v for (k, v) in metadata_file.items() if file_prefix + '.mp4' in k)
file_category = []
if value is not None and 'label' in value:
file_category = value['label']
if file_category not in categories or file_category is None:
print(file_path + ' removed (category: ' + file_category + ')')
all_files.remove(file_path)
random.seed(a=seed)
return all_files, random.sample(all_files, len(all_files))
def create_session_dir(output_dir): #TODO move to utils
assert(output_dir)
dir_name = str(dt.datetime.now().strftime("%m-%d-%y_%H-%M"))
output_dir = os.path.join(output_dir, dir_name)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
print('Created custom directory for session:', dir_name)
return output_dir
def create_subfolder(dir, name):
subdir = os.path.join(dir, name)
if not os.path.isdir(subdir):
os.mkdir(subdir)
print('Created subdir:', subdir)
return subdir
else:
return os.path.join(dir, name)
def store_output_frames_as_frames(output_frames, labels, output_dir):
""" Stores frame sequence produced by model as gif
Args:
output_frames: list with Tensors of shape [batch_size, frame_height, frame_width, num_channels],
each element corresponds to one frame in the produced gifs
labels: list with video_id's of shape [batch_size, label]
output_dir: path to output directory
"""
assert os.path.isdir(output_dir)
batch_size = output_frames[0].shape[0]
for i in range(batch_size): #iterate over validation instances
clip_array = [bgr_to_rgb(frame[i,:,:,:]) for frame in output_frames]
subdir = create_subfolder(output_dir, str(labels[i].decode('utf-8')))
clip = mpy.ImageSequenceClip(clip_array, fps=10).to_RGB()
clip.write_images_sequence(os.path.join(subdir, 'generated_clip_frame%03d.png'))
def store_output_frames_as_gif(output_frames, labels, output_dir):
""" Stores frame sequence produced by model as gif
Args:
output_frames: list with Tensors of shape [batch_size, frame_height, frame_width, num_channels],
each element corresponds to one frame in the produced gifs
labels: list with video_id's of shape [batch_size, label]
output_dir: path to output directory
"""
assert os.path.isdir(output_dir)
batch_size = output_frames[0].shape[0]
for i in range(batch_size): # iterate over validation instances
clip_array = [bgr_to_rgb(frame[i, :, :, :]) for frame in output_frames]
clip = mpy.ImageSequenceClip(clip_array, fps=10).to_RGB()
clip.write_gif(os.path.join(output_dir, 'generated_clip_' + str(labels[i].decode('utf-8')) + '.gif'),
program='ffmpeg')
def bgr_to_rgb(frame):
blue_channel = frame[:,:,0]
red_channel = frame[:,:,2]
frame[:, :, 2] = red_channel
frame[:, :, 0] = blue_channel
return frame
def write_file_with_append(path, text_to_append):
'''this function checks if a file (given by path) is available. If not, it creates the file and appends the text
(given by text_to_append) and if it does exist it appends the text and the end of the file'''
if os.path.exists(path):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
with open(path, append_write) as f:
if append_write is 'w':
f.write(str(text_to_append))
else:
f.write('\n' + str(text_to_append))
def write_metainfo(output_dir, model_name, flags):
with open(os.path.join(output_dir, 'metainfo.txt'), 'a') as f:
f.write('\n' + '---- Training: ' + str(dt.datetime.now()) + ' ----' + '\n')
f.write('model' + ': ' + str(os.path.basename(model_name)) + '\n') #print model name
for key, value in flags.__flags.items():
f.write(str(key) + ': ' + str(value) + '\n')
def store_dataframe(dataframe, output_dir, file_name):
assert os.path.exists(output_dir), "invalid path to output directory"
full_path = os.path.join(output_dir, file_name)
dataframe.to_pickle(full_path)
print("Dumped df pickle to ", full_path)
def store_latent_vectors_as_df(output_dir, hidden_representations, labels, metadata, video_file_paths=None, filename=None):
"""" exports the latent representation of the last encoder layer (possibly activations of fc layer if fc-flag activated)
and the video metadata as a pandas dataframe in python3 pickle format
:param output_dir: the path where the pickle file should be stored
:param hidden_representations: numpy array containing the activations
:param labels: the corresponding video id's
:param shapes: the corresponding shape of the object in the video
:param video_file_paths: path to videos (episodes) corresponding to memory instance
:param filename: name of the pickle file - if not provided, a filename is created automatically
Example shape of stored objects if no fc layer used
(each ndarray)
hidden repr file: shape(1000,8,8,16), each of 0..1000 representing the activation of encoder neurons
labels file: shape(1000,), each of 0..1000 representing the video_id for the coresponding activations
"""
# create 2 column df including hidden representations and labels/ids
hidden_representations = [hidden_representations[i] for i in
range(hidden_representations.shape[0])] # converts 2d ndarray to list of 1d ndarrays
if video_file_paths is not None:
hidden_rep_df = pd.DataFrame({'label': labels, 'hidden_repr': hidden_representations, 'video_file_path': video_file_paths})
else:
hidden_rep_df = pd.DataFrame({'label': labels, 'hidden_repr': hidden_representations})
hidden_rep_df['label'] = hidden_rep_df['label'].map(lambda x: x.decode('utf-8'))
# create dataframe from metadata
f = lambda x: x.decode('utf-8')
metadata_df = pd.DataFrame.from_dict([json.loads(f(e)) for e in list(metadata)], orient='columns')
#merge dataframes to one
df = pd.merge(hidden_rep_df, metadata_df, left_on='label', right_on='id')
if 'label_x' in df.columns:
df = df.drop_duplicates('label_x')
if not filename:
filename = os.path.join(output_dir, 'metadata_and_hidden_rep_df_' + str(dt.datetime.now().strftime("%m-%d-%y_%H-%M-%S")) +'.pickle')
print("exported df has shape:", df.shape)
df.to_pickle(filename)
print("Dumped df pickle to", filename)
return df
def store_encoder_latent_vector(output_dir, hidden_representations, labels, produce_single_files=True):
"""" exports the latent representation of the last encoder layer (possibly activations of fc layer if fc-flag activated)
and the video labels that created the activations as npy files.
:param output_dir: the path where the files should be stored
:param hidden_representations: numpy array containing the activations
:param labels: the corresponding video id's
:param produce_single_files: export representations as single files
Example shape of stored objects if no fc layer used
(each ndarray)
hidden repr file: shape(1000,8,8,16), each of 0..1000 representing the activation of encoder neurons
labels file: shape(1000,), each of 0..1000 representing the video_id for the coresponding activations
"""
assert os.path.isdir(output_dir) and hidden_representations.size > 0 and labels.size > 0, \
'Storing latent representation failed: Output dir does not exist or latent vector or/and label vector empty'
if produce_single_files:
for single_rep_itr in range(hidden_representations.shape[0]):
file_name = os.path.join(output_dir, labels[single_rep_itr].decode('utf-8'))
np.save(file_name, hidden_representations[single_rep_itr])
else:
tag = str(dt.datetime.now().strftime("%m-%d-%y_%H-%M-%S"))
file_name_hidden = os.path.join(output_dir, tag + '_hidden_repr')
np.save(file_name_hidden, hidden_representations)
file_name_label = os.path.join(output_dir, tag + '_label')
np.save(file_name_label, labels)
def store_plot(output_dir, name1, name2="", name3="", suffix=".png"):
assert output_dir is not None
assert name1 is not None
file_name = os.path.join(os.path.dirname(output_dir),
name1 + name2 + name3 + suffix)
plt.savefig(file_name, dpi=100)
print('Dumped plot to:', file_name)
def export_plot_from_pickle(pickle_file_path, plot_options=((64, 64), 15, 15), show=False):
"""
Loads a pickle file, generates a seaborn heatmap from its data and saves it to the dir of the specified pickle_file.
:param pickle_file_path: the full path to the pickle file.
:param plot_options: list of settings for matplotlib and seaborn. First list element specifies figure size as a
tuple e.g. 64x64. Second list element specifies font_scale for seaborn as a single integer, e.g. 15. Third list
element specifies annotation font size as a single integer, e.g. 15)
:param show: defines wheter this function should also show the generated plot in the GUI.
:return: the plot
"""
assert os.path.isfile(pickle_file_path)
df = | pd.read_pickle(pickle_file_path) | pandas.read_pickle |
"""Pandas/Numpy common recipes."""
import os
import scipy
import numpy as np
import pandas as pd
def rename_duplicates(series, delim="-"):
"""Rename duplicate values to be unique. ['a', 'a'] will become ['a', 'a-1'], for example.
:param series: series with values to rename
:type series: pandas.Series
:param delim: delimeter before duplicate-number index, defaults to "-"
:type delim: str, optional
:return: series where original duplicates have been renamed to -1, -2, etc.
:rtype: pandas.Series
"""
duplicate_suffix = (
series.groupby(series).cumcount().astype(str).replace("0", "")
) # a number for all but first occurence
extra_strs = delim + duplicate_suffix
# remove entries that are just the delim
extra_strs = extra_strs.replace(delim, "")
# add to values
out = series.astype(str) + extra_strs
# confirm unique (may fail if a-1 happened to match another element that preexisted!)
assert out.nunique() == out.shape[0]
return out
def merge_into_left(left, right, **kwargs):
"""Defensively merge [right] series or dataframe into [left] by index, preserving [left]'s index exactly. [right] data will be reordered to match [left] index.
:param left: left data whose index will be preserved
:type left: pandas.DataFrame or pandas.Series
:param right: right data which will be reordered based on left index.
:type right: pandas.DataFrame or pandas.Series
:param \**kwargs: passed to pandas.merge
:return: left-merged DataFrame with [left]'s index
:rtype: pandas.DataFrame
"""
# defensively cast to dataframe
df1 = | pd.DataFrame(left) | pandas.DataFrame |
import csv
import numpy as np
import matplotlib.pyplot as plt
import array
import pandas as pd
from scipy import stats
## INPUT
## Baseline years
ybl_st = 1992
ybl_ed = 2014
## Target range
yta_st = 1970
yta_ed = 2005
odata = | pd.read_csv('raw_stat.csv') | pandas.read_csv |
import io
import logging
from typing import Any
import pandas as pd
import helpers.hpandas as hpandas
import helpers.hprint as hprint
import helpers.hunit_test as hunitest
_LOG = logging.getLogger(__name__)
class Test_to_series1(hunitest.TestCase):
def helper(self, n: int, exp: str) -> None:
vals = list(range(n))
df = pd.DataFrame([vals], columns=[f"a{i}" for i in vals])
df = df.T
_LOG.debug("df=\n%s", df)
srs = hpandas.to_series(df)
_LOG.debug("srs=\n%s", srs)
act = str(srs)
self.assert_equal(act, exp, dedent=True, fuzzy_match=True)
def test1(self) -> None:
n = 0
exp = r"""
Series([], dtype: float64)
"""
self.helper(n, exp)
def test2(self) -> None:
n = 1
exp = r"""
a0 0
dtype: int64"""
self.helper(n, exp)
def test3(self) -> None:
n = 5
exp = r"""
a0 0
a1 1
a2 2
a3 3
a4 4
Name: 0, dtype: int64"""
self.helper(n, exp)
# #############################################################################
class Test_trim_df1(hunitest.TestCase):
def get_df(self, *args: Any, **kwargs: Any) -> pd.DataFrame:
"""
Return a df where the CSV txt is read verbatim without inferring dates.
The `start_time` column is thus a str.
"""
txt = """
,start_time,egid,close
4,2022-01-04 21:38:00.000000,13684,1146.48
8,2022-01-04 21:38:00.000000,17085,179.45
14,2022-01-04 21:37:00.000000,13684,1146.26
18,2022-01-04 21:37:00.000000,17085,179.42
24,2022-01-04 21:36:00.000000,13684,1146.0
27,2022-01-04 21:36:00.000000,17085,179.46
34,2022-01-04 21:35:00.000000,13684,1146.0
38,2022-01-04 21:35:00.000000,17085,179.42
40,2022-01-04 21:34:00.000000,17085,179.42
44,2022-01-04 21:34:00.000000,13684,1146.0
"""
txt = hprint.dedent(txt)
df = pd.read_csv(io.StringIO(txt), *args, index_col=0, **kwargs)
return df
def test_types1(self):
"""
Check the types of a df coming from `read_csv()`.
The timestamps in `start_time` are left as strings.
"""
df = self.get_df()
#
act = hprint.df_to_short_str("df", df, print_dtypes=True)
exp = r"""# df=
df.index in [4, 44]
df.columns=start_time,egid,close
df.shape=(10, 3)
df.type=
index: int64 <class 'numpy.int64'> 4
start_time: object <class 'str'> 2022-01-04 21:38:00.000000
egid: int64 <class 'numpy.int64'> 13684
close: float64 <class 'numpy.float64'> 1146.48
start_time egid close
4 2022-01-04 21:38:00.000000 13684 1146.48
8 2022-01-04 21:38:00.000000 17085 179.45
14 2022-01-04 21:37:00.000000 13684 1146.26
...
38 2022-01-04 21:35:00.000000 17085 179.42
40 2022-01-04 21:34:00.000000 17085 179.42
44 2022-01-04 21:34:00.000000 13684 1146.00"""
self.assert_equal(act, exp, fuzzy_match=True)
def get_df_with_parse_dates(self) -> pd.DataFrame:
"""
Read the CSV parsing `start_time` as timestamps.
The inferred type is a nasty `datetime64` which is not as well-
behaved as our beloved `pd.Timestamp`.
"""
df = self.get_df(parse_dates=["start_time"])
return df
def test_types2(self):
"""
Check the types of a df coming from `read_csv()` forcing parsing some
values as dates.
"""
df = self.get_df_with_parse_dates()
# Check.
act = hprint.df_to_short_str("df", df, print_dtypes=True)
exp = r"""# df=
df.index in [4, 44]
df.columns=start_time,egid,close
df.shape=(10, 3)
df.type=
index: int64 <class 'numpy.int64'> 4
start_time: datetime64[ns] <class 'numpy.datetime64'> 2022-01-04T21:38:00.000000000
egid: int64 <class 'numpy.int64'> 13684
close: float64 <class 'numpy.float64'> 1146.48
start_time egid close
4 2022-01-04 21:38:00 13684 1146.48
8 2022-01-04 21:38:00 17085 179.45
14 2022-01-04 21:37:00 13684 1146.26
...
38 2022-01-04 21:35:00 17085 179.42
40 2022-01-04 21:34:00 17085 179.42
44 2022-01-04 21:34:00 13684 1146.00"""
self.assert_equal(act, exp, fuzzy_match=True)
def get_df_with_tz_timestamp(self) -> pd.DataFrame:
"""
Force the column parsed as `datetime64` into a tz-aware object.
The resulting object is a `datetime64[ns, tz]`.
"""
df = self.get_df_with_parse_dates()
# Apply the tz.
col_name = "start_time"
df[col_name] = (
df[col_name].dt.tz_localize("UTC").dt.tz_convert("America/New_York")
)
df[col_name] = pd.to_datetime(df[col_name])
return df
def test_types3(self):
"""
Check the types of a df coming from `read_csv()` after conversion to
tz-aware objects.
"""
df = self.get_df_with_tz_timestamp()
# Check.
act = hprint.df_to_short_str("df", df, print_dtypes=True)
exp = r"""# df=
df.index in [4, 44]
df.columns=start_time,egid,close
df.shape=(10, 3)
df.type=
index: int64 <class 'numpy.int64'> 4
start_time: datetime64[ns, America/New_York] <class 'numpy.datetime64'> 2022-01-04T21:38:00.000000000
egid: int64 <class 'numpy.int64'> 13684
close: float64 <class 'numpy.float64'> 1146.48
start_time egid close
4 2022-01-04 16:38:00-05:00 13684 1146.48
8 2022-01-04 16:38:00-05:00 17085 179.45
14 2022-01-04 16:37:00-05:00 13684 1146.26
...
38 2022-01-04 16:35:00-05:00 17085 179.42
40 2022-01-04 16:34:00-05:00 17085 179.42
44 2022-01-04 16:34:00-05:00 13684 1146.00"""
self.assert_equal(act, exp, fuzzy_match=True)
def test_trim_df1(self):
"""
In general one can't filter a df with columns represented as `str`
using `pd.Timestamp` (either tz-aware or tz-naive).
Pandas helps us when filtering the index doing some conversion
for us. When it's a column, we have to handle it ourselves:
`trim_df` does that by converting the columns in `pd.Timestamp`.
"""
df = self.get_df()
# Run.
ts_col_name = "start_time"
start_ts = pd.Timestamp("2022-01-04 21:35:00")
end_ts = pd.Timestamp("2022-01-04 21:38:00")
left_close = True
right_close = True
df_trim = hpandas.trim_df(
df, ts_col_name, start_ts, end_ts, left_close, right_close
)
# Check.
act = hprint.df_to_short_str("df_trim", df_trim, print_dtypes=True)
exp = r"""# df_trim=
df.index in [4, 38]
df.columns=start_time,egid,close
df.shape=(8, 3)
df.type=
index: int64 <class 'numpy.int64'> 4
start_time: object <class 'str'> 2022-01-04 21:38:00.000000
egid: int64 <class 'numpy.int64'> 13684
close: float64 <class 'numpy.float64'> 1146.48
start_time egid close
4 2022-01-04 21:38:00.000000 13684 1146.48
8 2022-01-04 21:38:00.000000 17085 179.45
14 2022-01-04 21:37:00.000000 13684 1146.26
...
27 2022-01-04 21:36:00.000000 17085 179.46
34 2022-01-04 21:35:00.000000 13684 1146.00
38 2022-01-04 21:35:00.000000 17085 179.42"""
self.assert_equal(act, exp, fuzzy_match=True)
def test_trim_df2(self):
"""
Trim a df with a column that is `datetime64` without tz using a
`pd.Timestamp` without tz.
This operation is valid.
"""
df = self.get_df_with_parse_dates()
# Run.
ts_col_name = "start_time"
start_ts = | pd.Timestamp("2022-01-04 21:35:00") | pandas.Timestamp |
# Builtins
import yaml
import datetime as dt
from typing import Any, Dict, List, Tuple
# External libraries
import krakenex
import pandas as pd
# Submodule imports
from harvest.api._base import API
from harvest.utils import *
class Kraken(API):
interval_list = [
"1MIN",
"5MIN",
"15MIN",
"30MIN",
"1HR",
"4HR",
"1DAY",
"7DAY",
"15DAY",
]
crypto_ticker_to_kraken_names = {
"BTC": "XXBT",
"ETH": "XETH",
"ADA": "ADA",
"USDT": "USDT",
"XRP": "XXRP",
"SOL": "SOL",
"DOGE": "XDG",
"DOT": "DOT",
"USDC": "USDC",
"UNI": "UNI",
"LTC": "XLTC",
"LINK": "LINK",
"BCH": "BCH",
"FIL": "FIL",
"MATIC": "MATIC",
"WBTC": "WBTC",
"ETC": "XETC",
"XLM": "XXLM",
"TRX": "TRX",
"DAI": "DAI",
"EOS": "EOS",
"ATOM": "ATOM",
"AAVE": "AAVE",
"XMR": "XXMR",
"AXS": "AXS",
"GRT": "GRT",
"XTZ": "XXTZ",
"ALGO": "ALGO",
"MKR": "MKR",
"KSM": "KSM",
"WAVE": "WAVE",
"COMP": "COMP",
"DASH": "DASH",
"CHZ": "CHZ",
"ZEC": "XZEC",
"MANA": "MANA",
"ENJ": "ENJ",
"SUSHI": "SUSHI",
"YFI": "YFI",
"QTUM": "QTUM",
"FLOW": "FLOW",
"SNX": "SNX",
"BAT": "BAT",
"SC": "SC",
"ICX": "ICX",
"PERP": "PERP",
"BNT": "BNT",
"OMG": "OMG",
"CRV": "CRV",
"ZRX": "ZRX",
"NANO": "NANO",
"ANKR": "ANKR",
"SAND": "SAND",
"REN": "REN",
"KAVA": "KAVA",
"MINA": "MINA",
"1INCH": "1INCH",
"GHST": "GHST",
"ANT": "ANT",
"REP": "XREP",
"REPV2": "XREPV2",
"BADGER": "BADGER",
"BAL": "BAL",
"BAND": "BAND",
"CTSI": "CTSI",
"CQT": "CQT",
"EWT": "EWT",
"MLN": "XMLN",
"ETH2": "ETH2",
"GNO": "GNO",
"INJ": "INJ",
"KAR": "KAR",
"KEEP": "KEEP",
"KNC": "KNC",
"LSK": "LSK",
"LTP": "LTP",
"LRC": "LRC",
"MIR": "MIR",
"OCEAN": "OCEAN",
"PAXG": "PAXG",
"RARI": "RARI",
"REN": "REN",
"XRP": "XXRP",
"SRM": "SRM",
"STORJ": "STORJ",
"TBTC": "TBTC",
"OGN": "OGN",
"OXT": "OXT",
}
def __init__(self, path: str = None):
super().__init__(path)
self.api = krakenex.API(self.config["api_key"], self.config["secret_key"])
def setup(self, watch: List[str], interval: str, trader=None, trader_main=None):
self.watch_crypto = []
if is_crypto(s):
self.watch_crypto.append(s)
else:
debugger.error("Kraken does not support stocks.")
self.option_cache = {}
super().setup(watch, interval, interval, trader, trader_main)
def exit(self):
self.option_cache = {}
def main(self):
df_dict = {}
df_dict.update(self.fetch_latest_crypto_price())
self.trader_main(df_dict)
@API._exception_handler
def fetch_latest_crypto_price(self):
dfs = {}
for symbol in self.watch_cryptos:
dfs[symbol] = self.fetch_price_history(
symbol, self.interval, now() - dt.timedelta(days=7), now()
).iloc[[0]]
return dfs
# -------------- Streamer methods -------------- #
@API._exception_handler
def fetch_price_history(
self,
symbol: str,
interval: str,
start: dt.datetime = None,
end: dt.datetime = None,
):
debugger.debug(f"Fetching {symbol} {interval} price history")
if start is None:
start = now() - dt.timedelta(days=365 * 5)
elif not has_timezone(start):
start = set_system_timezone(start)
if end is None:
end = now()
elif not has_timezone(end):
end = set_system_timezone(end)
if start >= end:
return | pd.DataFrame() | pandas.DataFrame |
"""
Purpose: Stackoverflow answer
Date created: 2020-12-13
https://stackoverflow.com/questions/65267138/python-pandas-how-to-match-data-from-one-dataframe-to-another/65275924#65275924
Contributor(s):
<NAME>.
"""
import pandas as pd
"""
Username Stock 1 Stock 2
0 JB3004 TSLA MSFT
1 JM3009 SHOP SPOT
2 DB0208 TWTR MSFT
3 AB3011 TWTR PTON
4 CB3004 MSFT TSLA
TWTR SPOT PTON SHOP MSFT TSLA
Date Adj Close Adj Close Adj Close Adj Close Adj Close Adj Close
2020-12-11 51.44 341.22 117.1 1057.87 213.26 609.99
"""
ddict1 = {
"username": ["JB3004","JM3009","DB0208","AB3011","CB3004",],
"Stock 1": ["TSLA","SHOP","TWTR","TWTR","MSFT",],
"Stock 2": ["MSFT","SPOT","MSFT","PTON","TSLA",],
}
ddict2 = {
"TWTR": ["Adj Close", 51.44],
"SPOT": ["Adj Close", 341.22],
"PTON": ["Adj Close", 117.10],
"SHOP": ["Adj Close", 1057.87],
"MSFT": ["Adj Close", 213.26],
"TSLA": ["Adj Close", 609.99],
}
df1 = pd.DataFrame(ddict1)
df2 = pd.DataFrame(ddict2, index=["Date", "2020-12-11"])
df2t = df2.stack().reset_index().rename(
columns={
"level_0":"date",
"level_1":"stock",
0:"closing_price",
},
)
df2t = df2t.loc[df2t["date"] != "Date", :]
df1m = | pd.melt(df1, id_vars=["username"], value_vars=["Stock 1", "Stock 2"]) | pandas.melt |
import cufflinks as cf
from plotly.offline import init_notebook_mode
import plotly.graph_objects as go
import plotly.express as px
import os
import pandas as pd
import numpy as np
from dropbox_api import update_on_dropbox
from index import generate_index
from gauss import Gauss
cf.go_offline()
init_notebook_mode(connected=True)
###
countries_to_track = [
'Australia',
'Austria',
'China',
'Czechia',
'Colombia',
'France',
'Germany',
'India',
'Italy',
'Norway',
'Spain',
'Sweden',
'Switzerland',
'US',
'United Kingdom',
'Brazil'
]
states_to_track = ['Illinois', 'Massachusetts','Ohio']
counties_to_track = ['Suffolk']
###
homedir = os.path.dirname(os.path.realpath(__file__))
os.chdir(homedir)
git_repo = os.path.join(homedir, "..","COVID-19")
if os.path.exists(git_repo):
os.chdir(git_repo)
os.system("git pull")
os.chdir(homedir)
else:
os.chdir(os.path.join(homedir, ".."))
os.system("git clone https://github.com/CSSEGISandData/COVID-19")
os.chdir(homedir)
if not os.path.exists("plots"):
os.mkdir("plots")
_map = {"Cape Verde": "Cabo Verde",
"Czech Republic": 'Czechia',
"South Korea": "Korea, South",
"Taiwan": "Taiwan*",
"United States": "US"}
pop = pd.read_csv("population.csv")[["name","pop2019"]]
pop.replace(_map,inplace=True)
pop.index = pop.name
del pop["name"]
pop.pop2019*=1000
recovered_global = "../COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
confirmed_global = "../COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
death_global = "../COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
confirmed_US_series = "../COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv"
deaths_US_series = "../COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv"
def parse_time_series(path):
df = pd.read_csv(path)
df = df.groupby("Country/Region").sum()
df.drop(labels=["Lat", "Long"],axis=1, inplace= True)
df = df.transpose()
df.index = pd.to_datetime(df.index, format="%m/%d/%y")
return df
recovered = parse_time_series(recovered_global)
confirmed = parse_time_series(confirmed_global)
death = parse_time_series(death_global)
def parse_time_series_states(path):
labels=["UID","code3","FIPS","Lat", "Long_", "Population"]
df = pd.read_csv(path)
df = df.groupby("Province_State").sum()
for l in labels:
if l in df.columns:
df.drop(labels=l,axis=1, inplace= True)
df = df.transpose()
df.index = pd.to_datetime(df.index, format="%m/%d/%y")
return df
confirmed_US = parse_time_series_states(confirmed_US_series)
deaths_US = parse_time_series_states(deaths_US_series)
## check if country names are right
unknown = []
known = []
for country in countries_to_track:
if country not in confirmed.columns:
unknown.append(country)
else:
known.append(country)
other = list(set(confirmed.columns) - set(known))
## check if ### US state names are right
unknown_US = []
known_US = []
for state in states_to_track:
if state not in confirmed_US.columns:
unknown_US.append(state)
else:
known_US.append(state)
other = list(set(confirmed_US.columns) - set(known_US))
### preparing filtered data
### Global
confirmed_filtered = confirmed[known]
death_filtered = death[known]
cfrm = confirmed_filtered.iloc[-1,:]
dths = death_filtered.iloc[-1,:]
con_dea = pd.DataFrame(data={"confirmed": cfrm, "dead": dths}).transpose()
confirmed_growth = confirmed_filtered.diff().iloc[1:,:]
all_growth = confirmed.diff().iloc[1:,:]
death_growth = death_filtered.diff().iloc[1:,:]
### US
confirmed_filtered_US = confirmed_US[known_US]
death_filtered_US = deaths_US[known_US]
cfrm_us = confirmed_filtered_US.iloc[-1,:]
dths_us = death_filtered_US.iloc[-1,:]
con_dea_us = pd.DataFrame(data={"confirmed_US": cfrm_us, "dead_US": dths_us}).transpose()
confirmed_growth_us = confirmed_filtered_US.diff().iloc[1:,:]
all_growth_us = confirmed_US.diff().iloc[1:,:]
death_growth_us = death_filtered_US.diff().iloc[1:,:]
### normalizing data
confirmed_normed = | pd.DataFrame() | pandas.DataFrame |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = | pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"]) | pandas.DataFrame |
import pandas as pd
from typing import Dict
from arbiter import train
def get_predictions(sample: Dict, models_dict: Dict) -> Dict:
"""
Takes in a dictionary of sample metadata and checks each entry with the provided models
:param sample: A dictionary of sample metadata
:param models_dict: A dictionary with the models and a 'scaler': scaler entry
:return: A dictionary of dictionaries of model predictions
e.g.
{
'file_1': {'model name': prediction, [...]},
'file_2': {'model name': prediction, [...]}
}
"""
scaler = models_dict.pop('scaler')
df = | pd.DataFrame.from_dict(sample, orient='index') | pandas.DataFrame.from_dict |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/23 19:12
Desc: 东方财富网-数据中心-沪深港通持股
http://data.eastmoney.com/hsgtcg/
http://finance.eastmoney.com/news/1622,20161118685370149.html
"""
import requests
import json
import demjson
import pandas as pd
from bs4 import BeautifulSoup
def stock_em_hsgt_north_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_north_acc_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f1,f3,f5",
"fields2": "f51,f54",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18305732402561585701_1584961751919",
"_": "1584962164273",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sh"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["hk2sz"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "北上":
temp_df = pd.DataFrame(data_json["data"]["s2n"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_south_net_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f2,f4,f6",
"fields2": "f51,f52",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18307854355493858363_1584963487410",
"_": "1584964176697",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["sh2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["sz2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "南下":
temp_df = pd.DataFrame(data_json["data"]["n2s"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_south_cash(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f2,f4,f6",
"fields2": "f51,f53",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18307854355493858363_1584963487410",
"_": "1584964176697",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = pd.DataFrame(data_json["data"]["sh2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "深股通":
temp_df = pd.DataFrame(data_json["data"]["sz2hk"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
if indicator == "南下":
temp_df = pd.DataFrame(data_json["data"]["n2s"]).iloc[:, 0].str.split(",", expand=True)
temp_df.columns = ["date", "value"]
return temp_df
def stock_em_hsgt_south_acc_flow_in(indicator="沪股通"):
url = "http://push2his.eastmoney.com/api/qt/kamt.kline/get"
params = {
"fields1": "f2,f4,f6",
"fields2": "f51,f54",
"klt": "101",
"lmt": "500",
"ut": "b2884a393a59ad64002292a3e90d46a5",
"cb": "jQuery18307854355493858363_1584963487410",
"_": "1584964176697",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{"):-2])
if indicator == "沪股通":
temp_df = | pd.DataFrame(data_json["data"]["sh2hk"]) | pandas.DataFrame |
### MOVE TO UTIL
import urllib
import os
import re
import sklearn.metrics as metrics
import numpy as np
import stanfordnlp
import pandas as pd
from bllipparser import RerankingParser
from nltk import Tree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
import svgling
import pickle
from negbio.pipeline import text2bioc
import bioc
import itertools
from textblob import TextBlob
from tqdm import tqdm_notebook
import os
import bioc
import tqdm
from pathlib2 import Path
from negbio.chexpert.stages.aggregate import NegBioAggregator
from negbio.chexpert.stages.classify import ModifiedDetector, CATEGORIES
from negbio.chexpert.stages.extract import NegBioExtractor
from negbio.chexpert.stages.load import NegBioLoader
from negbio.pipeline import text2bioc, negdetect
from negbio.pipeline.parse import NegBioParser
from negbio.pipeline.ptb2ud import NegBioPtb2DepConverter, Lemmatizer
from negbio.pipeline.ssplit import NegBioSSplitter
from negbio.main_chexpert import pipeline
PARSING_MODEL_DIR = "~/.local/share/bllipparser/GENIA+PubMed"
CHEXPERT_PATH = "NegBio/negbio/chexpert/"
MENTION_PATH =f"{CHEXPERT_PATH}phrases/mention"
UNMENTION_PATH = f"{CHEXPERT_PATH}phrases/"
NEG_PATH = f'{CHEXPERT_PATH}patterns/negation.txt'
PRE_NEG_PATH = f'{CHEXPERT_PATH}patterns/pre_negation_uncertainty.txt'
POST_NEG_PATH = f'{CHEXPERT_PATH}patterns/post_negation_uncertainty.txt'
PHRASES_PATH = f"{CHEXPERT_PATH}phrases/"
TEST_PATH = "stanford_report_test.csv"
test_df = pd.read_csv(TEST_PATH)
CATEGORIES = ["Cardiomegaly",
"Lung Lesion", "Airspace Opacity", "Edema", "Consolidation",
"Pneumonia", "Atelectasis", "Pneumothorax", "Pleural Effusion",
"Pleural Other", "Fracture"]
test_df = test_df[['Report Impression'] + CATEGORIES]
test_df = test_df.replace(1, True).fillna(False).replace(0, False).replace(-1, False)
def get_dict(path):
label_to_mention = {}
mention_files = os.listdir(path)
for f in mention_files:
with open(os.path.join(path, f)) as mention_file:
condition = os.path.basename(f)[:-4]
condition = condition.replace("_", " ").title()
if condition not in label_to_mention:
label_to_mention[condition] = []
for line in mention_file:
label_to_mention[condition].append(line.split("\n")[0])
return label_to_mention
mentions = get_dict(PHRASES_PATH + "mention")
unmentions = get_dict(PHRASES_PATH + "unmention")
mentions_pk = "mentions.pkl"
unmentions_pk = "unmentions.pkl"
pickle.dump(mentions, open(mentions_pk, "wb"))
pickle.dump(unmentions, open(unmentions_pk, "wb"))
mentions = pickle.load(open(mentions_pk, "rb"))
unmentions = pickle.load(open(unmentions_pk, "rb"))
## MOVE TO UTIL
def get_mention_keywords(observation):
if observation in mentions:
return mentions[observation]
else:
return []
chexpert_results_mention = {
'No Finding': 0.769,
'Lung Lesion': 0.896,
'Fracture': 0.975,
'Pleural Other': 0.850,
'Pleural Effusion': 0.985,
'Pneumonia': 0.660,
'Pneumothorax': 1.000,
'Lung Opacity': 0.966,
'Edema': 0.996,
'Support Devices': 0.933,
'Atelectasis': 0.998,
'Enlarged Cardiomediastinum': 0.935,
'Cardiomegaly': 0.973,
'Consolidation': 0.999
}
chexpert_results_unmention = {
'No Finding': float("nan"),
'Lung Lesion': 0.900,
'Fracture': 0.807,
'Pleural Other': 1.00,
'Pleural Effusion': 0.971,
'Pneumonia': 0.750,
'Pneumothorax': 0.977,
'Lung Opacity': 0.914,
'Edema': 0.962,
'Support Devices': 0.720,
'Atelectasis': 0.833,
'Enlarged Cardiomediastinum': 0.959,
'Cardiomegaly': 0.909,
'Consolidation': 0.981
}
## MOVE TO UTIL
def get_bioc_collection(df):
collection = bioc.BioCCollection()
splitter = NegBioSSplitter()
for i, report in enumerate(df["Report Impression"]):
document = text2bioc.text2document(str(i), report)
document = splitter.split_doc(document)
collection.add_document(document)
return collection
def clean(sentence):
"""Clean the text."""
punctuation_spacer = str.maketrans({key: f"{key} " for key in ".,"})
lower_sentence = sentence.lower()
# Change `and/or` to `or`.
corrected_sentence = re.sub('and/or',
'or',
lower_sentence)
# Change any `XXX/YYY` to `XXX or YYY`.
corrected_sentence = re.sub('(?<=[a-zA-Z])/(?=[a-zA-Z])',
' or ',
corrected_sentence)
# Clean double periods
clean_sentence = corrected_sentence.replace("..", ".")
# Insert space after commas and periods.
clean_sentence = clean_sentence.translate(punctuation_spacer)
# Convert any multi white spaces to single white spaces.
clean_sentence = ' '.join(clean_sentence.split())
return clean_sentence
def calculate_f1(df, pred_frame):
# calculate F1
results = pd.DataFrame()
for cat in CATEGORIES:
gt = df[cat]
pred = pred_frame[cat]
f1 = metrics.f1_score(gt, pred)
results = results.append({ "Label": cat, "F1": round(f1, 3)}, ignore_index=True)
results = results.append({ "Label": "Average", "F1": round(results["F1"].mean(), 3)}, ignore_index=True)
return results[["Label", "F1"]]
def get_preds(classfication_function, df, cleanup=False):
# generate labels
collection = get_bioc_collection(df)
docs = collection.documents
pred_frame = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import time
import glob
import argparse
import pandas as pd
import tensorflow as tf
import xml.etree.ElementTree as ET
from PIL import Image
from random import shuffle
from object_detection.utils import label_map_util
from object_detection.utils import dataset_util
from collections import namedtuple
from utils.utils import set_log, check_time
def make_summary(logger,rows):
logger.info('{0:^50}'.format('TF Record Summary'))
logger.info('{0:^10}'.format('ID') + '{0:^20}'.format('NAME') + '{0:^10}'.format('Train') + '{0:^10}'.format('Validate'))
for i in rows:
logger.info('{0:^10}'.format(i[0]) + '{0:^20}'.format(i[1]) + '{0:^10}'.format(i[2]) + '{0:^10}'.format(i[3]))
def get_label_category(args):
label_map = label_map_util.load_labelmap(args['label_file'])
return label_map_util.convert_label_map_to_categories(label_map, max_num_classes=int(args['max_num_classes']),use_display_name=True)
def make_category_dict(categories):
category_dict = {}
for i in range(len(categories)):
category_dict[categories[i]['name']] = categories[i]['id']
return category_dict
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def user_input():
config = argparse.ArgumentParser()
config.add_argument('-m', '--max_num_classes', help='Maximum class number', default='90', type=int,required=False)
config.add_argument('-i','--input_folder',help='Input Images Forlder',default='./images/',type=str, required=False)
config.add_argument('-l', '--label_file', help='Label file Location', default='./label_map.pbtxt', type=str,required=False)
config.add_argument('-c', '--custom_csv', help='Custom csv', default=False, type=str,required=False)
config.add_argument('-tc', '--train_csv_output', help='Train csv output file Location', default='./dataset/train.csv', type=str,required=False)
config.add_argument('-vc', '--validate_csv_output', help='Validate csv output file Location', default='./dataset/validate.csv', type=str,required=False)
config.add_argument('-sr', '--split_rate', help='Dataset split rate ( 8 = train 80 | validate 20 )', default='8', type=int, required=False)
config.add_argument('-lv', '--log_level',help='Logger Level [DEBUG, INFO(Default), WARNING, ERROR, CRITICAL]', default='INFO', type=str,required=False)
args = config.parse_args()
arguments = vars(args)
return arguments
def xml_to_csv(logger,args):
label_cnt = len(categories)
labels = []
for i in range(label_cnt):
labels.append(categories[i]['name'])
xml_list = []
for i in range(label_cnt):
xml_list.append([])
for xml_file in glob.glob(args['input_folder'] + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list[labels.index(member[0].text)].append(value)
for i in range(label_cnt):
shuffle(xml_list[i])
train = []
validate = []
summaries = []
for i in range(label_cnt):
rate = int( len(xml_list[i]) * (float(args['split_rate'])/10.0))
tmptrain = xml_list[i][:rate]
tmpvalidate = xml_list[i][rate:]
summary = (category_dict[xml_list[i][0][3]], xml_list[i][0][3],len(tmptrain),len(tmpvalidate))
summaries.append(summary)
train.extend(tmptrain)
validate.extend(tmpvalidate)
make_summary(logger,summaries)
shuffle(train)
shuffle(validate)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
train_df = pd.DataFrame(train, columns=column_name)
validate_df = pd.DataFrame(validate, columns=column_name)
train_df.to_csv(args['train_csv_output'], index=None)
validate_df.to_csv(args['validate_csv_output'], index=None)
train_csv = pd.read_csv(args['train_csv_output'])
validate_csv = pd.read_csv(args['validate_csv_output'])
return train_csv, validate_csv
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(int(row['xmin']) / width)
xmaxs.append(int(row['xmax']) / width)
ymins.append(int(row['ymin']) / height)
ymaxs.append(int(row['ymax']) / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(category_dict[row['class']])
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main():
args = user_input()
start_time = time.time()
# logger setting
logger = set_log(args['log_level'])
logger.info('TF Record Generator Start')
global categories
categories = get_label_category(args)
global category_dict
category_dict = make_category_dict(categories)
#make xml file to dataframe
if not args['custom_csv']:
train, validate = xml_to_csv(logger, args)
else:
train = | pd.read_csv(args['train_csv_output']) | pandas.read_csv |
import os
import re
import pandas as pd
table = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = pd.Index(['s1', 's2', 's3', 's4'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'color', 255)
exp = pd.Index(['s4', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'num_reps', 3)
exp = pd.Index(['s1', 's3', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
class TestCollapseSourceData(TestCase):
def test_example1(self):
# Simple example with 'sum' as collapse mode.
samples = ['sample1', 'sample2', 'sample3', 'sample4']
category = 'pH'
values = [3.0, 0.4, 3.0, 3.0]
stable = pd.DataFrame(values, index=samples, columns=[category])
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=stable.index,
columns=map(str, np.arange(4)))
source_samples = ['sample1', 'sample2', 'sample3']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :], fdata[0, :] + fdata[2, :]))
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
# Example with collapse mode 'mean'. This will cause non-integer values
# to be present, which the validate_gibbs_input should catch.
source_samples = ['sample1', 'sample2', 'sample3', 'sample4']
method = 'mean'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :],
fdata[[0, 2, 3], :].mean(0))).astype(np.int32)
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
def test_example2(self):
# Test on another arbitrary example.
data = np.arange(200).reshape(20, 10)
oids = ['o%s' % i for i in range(20)]
sids = ['s%s' % i for i in range(10)]
ftable = pd.DataFrame(data.T, index=sids, columns=oids)
_stable = \
{'s4': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's0': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'D'},
's1': {'cat1': '1', 'cat2': 'x', 'cat3': 'A', 'cat4': 'C'},
's3': {'cat1': '2', 'cat2': 'y', 'cat3': 'z', 'cat4': 'A'},
's2': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's6': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'R'},
's5': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's7': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's9': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's8': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'}}
stable = pd.DataFrame(_stable).T
category = 'cat4'
source_samples = ['s4', 's9', 's0', 's2']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_index = np.array(['0', 'D'])
exp_data = np.array([[9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 109, 119,
129, 139, 149, 159, 169, 179, 189, 199],
[6, 36, 66, 96, 126, 156, 186, 216, 246, 276, 306,
336, 366, 396, 426, 456, 486, 516, 546, 576]],
dtype=np.int32)
exp = pd.DataFrame(exp_data, index=exp_index, columns=oids)
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
class TestSubsampleDataframe(TestCase):
def test_no_errors_expected(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_subsample_with_replacement(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n, replace=True)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_shape_doesnt_change(self):
# Test that when features are removed by subsampling, the shape of the
# table does not change. Although rarifaction is stochastic, the
# probability that the below table does not lose at least one feature
# during rarefaction (and thus satisfy as the test of the condition we)
# are interested in) is nearly 0.
fdata = np.array([[0, 0, 0, 1e4],
[0, 0, 1, 1e4],
[0, 1, 0, 1e4],
[1, 0, 0, 1e4]]).astype(int)
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 10
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
self.assertEqual(obs.shape, ftable.shape)
class TestDataAggregationFunctions(TestCase):
'''Test that returned data is collated and written correctly.'''
def test_cumulative_proportions(self):
# 4 draws, 4 sources + unknown, 3 sinks
sink1_envcounts = np.array([[10, 100, 15, 0, 25],
[150, 0, 0, 0, 0],
[30, 30, 30, 30, 30],
[0, 11, 7, 35, 97]])
sink2_envcounts = np.array([[100, 10, 15, 0, 25],
[100, 0, 50, 0, 0],
[0, 60, 30, 30, 30],
[7, 11, 0, 35, 97]])
sink3_envcounts = np.array([[100, 10, 10, 5, 25],
[70, 20, 30, 30, 0],
[10, 30, 50, 30, 30],
[0, 27, 100, 20, 3]])
all_envcounts = [sink1_envcounts, sink2_envcounts, sink3_envcounts]
sink_ids = np.array(['sink1', 'sink2', 'sink3'])
source_ids = np.array(['source1', 'source2', 'source3', 'source4'])
cols = list(source_ids) + ['Unknown']
prp_r1 = np.array([190, 141, 52, 65, 152]) / 600.
prp_r2 = np.array([207, 81, 95, 65, 152]) / 600.
prp_r3 = np.array([180, 87, 190, 85, 58]) / 600.
prp_data = np.vstack([prp_r1, prp_r2, prp_r3])
prp_std_data = np.zeros((3, 5), dtype=np.float64)
prp_std_data[0, 0] = (np.array([10, 150, 30, 0]) / 600.).std()
prp_std_data[0, 1] = (np.array([100, 0, 30, 11]) / 600.).std()
prp_std_data[0, 2] = (np.array([15, 0, 30, 7]) / 600.).std()
prp_std_data[0, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[0, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[1, 0] = (np.array([100, 100, 0, 7]) / 600.).std()
prp_std_data[1, 1] = (np.array([10, 0, 60, 11]) / 600.).std()
prp_std_data[1, 2] = (np.array([15, 50, 30, 0]) / 600.).std()
prp_std_data[1, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[1, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[2, 0] = (np.array([100, 70, 10, 0]) / 600.).std()
prp_std_data[2, 1] = (np.array([10, 20, 30, 27]) / 600.).std()
prp_std_data[2, 2] = (np.array([10, 30, 50, 100]) / 600.).std()
prp_std_data[2, 3] = (np.array([5, 30, 30, 20]) / 600.).std()
prp_std_data[2, 4] = (np.array([25, 0, 30, 3]) / 600.).std()
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=cols)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids, columns=cols)
obs_prp, obs_prp_std = cumulative_proportions(all_envcounts, sink_ids,
source_ids)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
def test_single_sink_feature_table(self):
# 4 draws, depth of sink = 10, 5 sources + Unknown.
final_env_assignments = np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
# notice that each row is the same - they are determined by
# `generate_taxon_sequence` before the `gibbs_sampler` runs.
final_taxon_assignments = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
# we are allowing more taxa than we have found in this sample, i.e. the
# largest value in `final_taxon_assignments` will be smaller than the
# largest index in the columns of the final table.
nfeatures = 1250
nsources = 5
data = np.zeros((nsources + 1, nfeatures), dtype=np.int32)
# for the purpose of this test code, I'll increment data taxa by taxa.
data[np.array([5, 1, 4, 2]), 0] += 1
data[0, 3] += 3
data[1, 3] += 3
data[3, 3] += 1
data[4, 3] += 1
data[np.array([0, 3, 4, 5]), 227] += 1
data[0, 550] += 1
data[1, 550] += 3
data[2, 550] += 3
data[4, 550] += 2
data[5, 550] += 3
data[0, 999] += 2
data[1, 999] += 4
data[3, 999] += 2
data[1, 1100] += 2
data[2, 1100] += 2
exp_sources = ['source%s' % i for i in range(nsources)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp = pd.DataFrame(data, index=exp_sources, columns=feature_ids)
source_ids = np.array(['source%s' % i for i in range(nsources)])
obs = single_sink_feature_table(final_env_assignments,
final_taxon_assignments, source_ids,
feature_ids)
pd.util.testing.assert_frame_equal(obs, exp)
def test_collate_gibbs_results(self):
# We'll vary the depth of the sinks - simulating a situation where the
# user has not rarefied.
# We'll set:
# draws = 4
# sink_depths = [10, 15, 7]
# sources = 5 (+1 unknown)
final_env_counts_sink1 = np.array([[5, 2, 1, 1, 0, 1],
[0, 6, 2, 2, 0, 0],
[0, 3, 1, 1, 5, 0],
[2, 2, 2, 0, 0, 4]])
final_env_assignments_sink1 = \
np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
final_taxon_assignments_sink1 = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
final_env_counts_sink2 = np.array([[5, 1, 3, 2, 0, 4],
[1, 1, 4, 5, 1, 3],
[4, 1, 3, 2, 3, 2],
[2, 3, 3, 2, 1, 4]])
final_env_assignments_sink2 = \
np.array([[2, 5, 0, 5, 1, 5, 0, 0, 3, 0, 3, 5, 2, 2, 0],
[3, 2, 2, 3, 2, 3, 3, 5, 5, 1, 3, 4, 2, 0, 5],
[0, 2, 3, 2, 0, 0, 2, 4, 5, 4, 0, 5, 3, 1, 4],
[4, 3, 2, 1, 2, 5, 3, 5, 2, 0, 1, 0, 5, 1, 5]])
final_taxon_assignments_sink2 = \
np.array([[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249]])
final_env_counts_sink3 = np.array([[4, 2, 0, 0, 1, 0],
[0, 3, 1, 0, 2, 1],
[0, 0, 1, 1, 3, 2],
[2, 1, 0, 3, 0, 1]])
final_env_assignments_sink3 = \
np.array([[4, 0, 0, 0, 1, 0, 1],
[1, 2, 1, 4, 5, 4, 1],
[4, 3, 5, 4, 4, 5, 2],
[3, 0, 1, 3, 3, 0, 5]])
final_taxon_assignments_sink3 = \
np.array([[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249]])
# Create expected proportion data.
prp_data = np.zeros((3, 6), dtype=np.float64)
prp_std_data = np.zeros((3, 6), dtype=np.float64)
prp_data[0] = (final_env_counts_sink1.sum(0) /
final_env_counts_sink1.sum())
prp_data[1] = (final_env_counts_sink2.sum(0) /
final_env_counts_sink2.sum())
prp_data[2] = (final_env_counts_sink3.sum(0) /
final_env_counts_sink3.sum())
prp_std_data[0] = \
(final_env_counts_sink1 / final_env_counts_sink1.sum()).std(0)
prp_std_data[1] = \
(final_env_counts_sink2 / final_env_counts_sink2.sum()).std(0)
prp_std_data[2] = \
(final_env_counts_sink3 / final_env_counts_sink3.sum()).std(0)
sink_ids = ['sink1', 'sink2', 'sink3']
exp_sources = ['source%s' % i for i in range(5)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids,
columns=exp_sources)
# Create expected feature table data.
ft1 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink1.ravel(),
final_taxon_assignments_sink1.ravel()):
ft1[r, c] += 1
exp_ft1 = pd.DataFrame(ft1, index=exp_sources, columns=feature_ids)
ft2 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink2.ravel(),
final_taxon_assignments_sink2.ravel()):
ft2[r, c] += 1
exp_ft2 = pd.DataFrame(ft2, index=exp_sources, columns=feature_ids)
ft3 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink3.ravel(),
final_taxon_assignments_sink3.ravel()):
ft3[r, c] += 1
exp_ft3 = pd.DataFrame(ft3, index=exp_sources, columns=feature_ids)
exp_fts = [exp_ft1, exp_ft2, exp_ft3]
# Prepare the inputs for passing to collate_gibbs_results
all_envcounts = [final_env_counts_sink1, final_env_counts_sink2,
final_env_counts_sink3]
all_env_assignments = [final_env_assignments_sink1,
final_env_assignments_sink2,
final_env_assignments_sink3]
all_taxon_assignments = [final_taxon_assignments_sink1,
final_taxon_assignments_sink2,
final_taxon_assignments_sink3]
# Test when create_feature_tables=True
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=True, loo=False)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
for i in range(3):
pd.util.testing.assert_frame_equal(obs_fts[i], exp_fts[i])
# Test when create_feature_tables=False
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=False, loo=False)
self.assertTrue(obs_fts is None)
def test_collate_gibbs_results_loo(self):
# We'll vary the depth of the sources - simulating a situation where
# the user has not rarefied.
# We'll set:
# draws = 2
# source_depths = [7, 4, 5]
# sources = 3 (+1 Unknown)
ec1 = np.array([[6, 0, 1],
[2, 2, 3]])
ea1 = np.array([[0, 2, 0, 0, 0, 0, 0],
[0, 1, 0, 2, 1, 2, 2]])
ta1 = np.array([[2, 2, 2, 4, 4, 4, 6],
[2, 2, 2, 4, 4, 4, 6]])
ec2 = np.array([[1, 2, 1],
[2, 2, 0]])
ea2 = np.array([[0, 1, 2, 1],
[0, 1, 1, 0]])
ta2 = np.array([[3, 3, 3, 3],
[3, 3, 3, 3]])
ec3 = np.array([[1, 2, 2],
[4, 0, 1]])
ea3 = np.array([[1, 1, 0, 2, 2],
[0, 0, 0, 0, 2]])
ta3 = np.array([[3, 3, 4, 5, 5],
[3, 3, 4, 5, 5]])
# Create expected proportion data.
prp_data = np.array([[0, 8/14., 2/14., 4/14.],
[3/8., 0, 4/8., 1/8.],
[5/10., 2/10., 0, 3/10.]], dtype=np.float64)
prp_std_data = np.zeros((3, 4), dtype=np.float64)
prp_std_data[0, 1:] = (ec1 / ec1.sum()).std(0)
prp_std_data[1, np.array([0, 2, 3])] = (ec2 / ec2.sum()).std(0)
prp_std_data[2, np.array([0, 1, 3])] = (ec3 / ec3.sum()).std(0)
exp_sources = ['source%s' % i for i in range(3)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(7)]
exp_prp = pd.DataFrame(prp_data, index=exp_sources[:-1],
columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=exp_sources[:-1],
columns=exp_sources)
# Create expected feature table data.
ft1 = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 4, 0, 3, 0, 1],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 2, 0, 1]], dtype=np.int64)
ft2 = np.array([[0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.int64)
ft3 = np.array([[0, 0, 0, 2, 2, 1, 0],
[0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0]], dtype=np.int64)
exp_fts = [pd.DataFrame(ft1, index=exp_sources, columns=feature_ids),
| pd.DataFrame(ft2, index=exp_sources, columns=feature_ids) | pandas.DataFrame |
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
#Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('ratings.csv')
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
#Dropping the genres column
movies_df = movies_df.drop('genres', 1)
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = | pd.DataFrame(userInput) | pandas.DataFrame |
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2015, Graphistry, Inc.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Graphistry, Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL Graphistry, Inc BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cudf
from cudf.tests.utils import assert_eq
import cugraph
import datetime as dt
import pandas as pd
import pytest
simple_df = cudf.DataFrame.from_pandas(pd.DataFrame({
"id": ["a", "b", "c"],
"a1": [1, 2, 3],
"a2": ["red", "blue", "green"],
"🙈": ["æski ēˈmōjē", "😋", "s"],
}))
hyper_df = cudf.DataFrame.from_pandas(pd.DataFrame({
"aa": [0, 1, 2],
"bb": ["a", "b", "c"],
"cc": ["b", "0", "1"]
}))
def test_complex_df():
complex_df = pd.DataFrame({
"src": [0, 1, 2, 3],
"dst": [1, 2, 3, 0],
"colors": [1, 1, 2, 2],
"bool": [True, False, True, True],
"char": ["a", "b", "c", "d"],
"str": ["a", "b", "c", "d"],
"ustr": [u"a", u"b", u"c", u"d"],
"emoji": ["😋", "😋😋", "😋", "😋"],
"int": [0, 1, 2, 3],
"num": [0.5, 1.5, 2.5, 3.5],
"date_str": [
"2018-01-01 00:00:00",
"2018-01-02 00:00:00",
"2018-01-03 00:00:00",
"2018-01-05 00:00:00",
],
"date": [
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
],
"time": [
pd.Timestamp("2018-01-05"),
| pd.Timestamp("2018-01-05") | pandas.Timestamp |
import pandas as pd
from apiclient.discovery import build
from apiclient.errors import HttpError
import settings
API_KEY = settings.YT_API
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
CHANNEL_ID = 'UCHp2q2i85qt_9nn2H7AvGOw'
channels = [] #チャンネル情報を格納する配列
searches = [] #videoidを格納する配列
videos = [] #各動画情報を格納する配列
nextPagetoken = None
nextpagetoken = None
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=API_KEY
)
channel_response = youtube.channels().list(
part = 'snippet,statistics',
id = CHANNEL_ID
).execute()
for channel_result in channel_response.get("items", []):
if channel_result["kind"] == "youtube#channel":
channels.append([channel_result["snippet"]["title"],channel_result["statistics"]["subscriberCount"],channel_result["statistics"]["videoCount"],channel_result["snippet"]["publishedAt"]])
while True:
if nextPagetoken != None:
nextpagetoken = nextPagetoken
search_response = youtube.search().list(
part = "snippet",
channelId = CHANNEL_ID,
maxResults = 50,
order = "date", #日付順にソート
pageToken = nextpagetoken #再帰的に指定
).execute()
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
searches.append(search_result["id"]["videoId"])
try:
nextPagetoken = search_response["nextPageToken"]
except:
break
for result in searches:
video_response = youtube.videos().list(
part = 'snippet,statistics',
id = result
).execute()
for video_result in video_response.get("items", []):
if video_result["kind"] == "youtube#video":
videos.append([video_result["snippet"]["title"],video_result["statistics"]["viewCount"],video_result["statistics"]["likeCount"],video_result["statistics"]["dislikeCount"],video_result["statistics"]["commentCount"],video_result["snippet"]["publishedAt"]])
videos_report = | pd.DataFrame(videos, columns=['title', 'viewCount', 'likeCount', 'dislikeCount', 'commentCount', 'publishedAt']) | pandas.DataFrame |
import pandas as pd
import os
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if directory and not os.path.exists(directory):
print("Creating new directory", directory)
os.makedirs(directory)
import json
def get_cfg_str(x):
# json.dumps(r.to_dict(), sort_keys=True, separators = (',', '~'))[1:-1]
# It seems DoIt does not allow equal (=) char in task name
return ",".join(['{}~{}'.format(k,v) for (k,v) in sorted(x.to_dict().items()) if k not in ['JUDI', 'name']])
def combine_csvs_base(params, infiles, outfile):
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import pandas as pd
pd.options.mode.chained_assignment = None
import os
from sklearn.feature_extraction.text import CountVectorizer
from datetime import timedelta
import tweepy
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords', quiet=True)
stop = stopwords.words('english')
import credentials
import config
def apply_account_link(row):
return "https://twitter.com/{}".format(row['user_screen_name'])
def add_tick(row):
if row['user_verified'] == True:
return row['user_name'] + ' ☑️'
else:
return row['user_name']
# Quick and easy functions for dataframe manipulation.
def applying_url(row):
return "https://twitter.com/{}/status/{}".format(row['user_screen_name'], row['id_str'])
def save_to_csv(df, name):
filename = f'{name}.csv'
save_path = config.file_path + '/' + filename
return df.to_csv(save_path)
def find_original_tweet(merged_all, tweets_raw):
auth = tweepy.AppAuthHandler(credentials.consumer_key, credentials.consumer_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
for index, row in merged_all.iterrows():
if pd.isnull(row.tweet_url):
tweet_id = int(tweets_raw[tweets_raw.text == row.text].retweeted_status_id.values[0])
try:
status = api.get_status(tweet_id, tweet_mode="extended")
user_name = status.user.screen_name
merged_all.at[index, 'tweet_url'] = "https://twitter.com/{}/status/{}".format(user_name, tweet_id)
except:
merged_all.at[index, 'tweet_url'] = "N/A"
def mine_tweets(df):
tweets_raw = df[['id_str',
'user_id_str',
'created_at',
'user_screen_name',
'user_name',
'user_created_at',
'user_statuses_count',
'user_verified',
'text',
'entities_urls',
'retweeted_status_id']]
tweets_raw['created_at'] = | pd.to_datetime(tweets_raw['created_at'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from numba import cuda, jit
from bdb_tools.readers import build_reader
q03_days_in_sec_before_purchase = 864000
q03_views_before_purchase = 5
q03_purchased_item_IN = 10001
q03_purchased_item_category_IN = 2, 3
q03_limit = 100
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_cols = ["i_category_id", "i_item_sk"]
wcs_cols = [
"wcs_user_sk",
"wcs_click_time_sk",
"wcs_click_date_sk",
"wcs_item_sk",
"wcs_sales_sk",
]
item_df = table_reader.read("item", relevant_cols=item_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("item", item_df, persist=False)
return item_df
@jit(nopython=True)
def find_items_viewed_before_purchase_kernel_both(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i
):
# """
# Find the past N items viewed after a relevant purchase was made,
# as defined by the configuration of this query.
# """
if i < (relevant_idx_col.size): # boundary guard
# every relevant row gets N rows in the output, so we need to map the indexes
# back into their position in the original array
orig_idx = relevant_idx_col[i]
current_user = user_col[orig_idx]
# look at the previous N clicks (assume sorted descending)
rows_to_check = N
remaining_rows = user_col.size - orig_idx
if remaining_rows <= rows_to_check:
rows_to_check = remaining_rows - 1
for k in range(1, rows_to_check + 1):
if current_user != user_col[orig_idx + k]:
out_col[i * N + k - 1] = 0
# only checking relevant purchases via the relevant_idx_col
elif (timestamp_col[orig_idx + k] <= timestamp_col[orig_idx]) & (
timestamp_col[orig_idx + k]
>= (timestamp_col[orig_idx] - q03_days_in_sec_before_purchase)
):
out_col[i * N + k - 1] = item_col[orig_idx + k]
else:
out_col[i * N + k - 1] = 0
@cuda.jit
def find_items_viewed_before_purchase_kernel_gpu(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = cuda.grid(1)
find_items_viewed_before_purchase_kernel_both(relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i)
@jit(nopython=True)
def find_items_viewed_before_purchase_kernel_cpu(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = 0
find_items_viewed_before_purchase_kernel_both(relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i)
def apply_find_items_viewed(df, item_mappings):
# need to sort descending to ensure that the
# next N rows are the previous N clicks
import pandas as pd
import numpy as np
df = df.sort_values(
by=["wcs_user_sk", "tstamp", "wcs_sales_sk", "wcs_item_sk"],
ascending=[False, False, False, False],
)
df.reset_index(drop=True, inplace=True)
df["relevant_flag"] = (df.wcs_sales_sk != 0) & (
df.wcs_item_sk == q03_purchased_item_IN
)
df["relevant_idx_pos"] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
# only allocate output for the relevant rows
sample = df.loc[df.relevant_flag == True]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
# we know this can be int32, since it's going to contain item_sks
out_arr = np.zeros(size * N, dtype=df["wcs_item_sk"].dtype, like=df["wcs_item_sk"].values)
if isinstance(df, cudf.DataFrame):
find_items_viewed_before_purchase_kernel_gpu.forall(size)(
sample["relevant_idx_pos"],
df["wcs_user_sk"],
df["tstamp"],
df["wcs_item_sk"],
out_arr,
N,
)
result = cudf.DataFrame({"prior_item_viewed": out_arr})
else:
find_items_viewed_before_purchase_kernel_cpu(
sample["relevant_idx_pos"].to_numpy(),
df["wcs_user_sk"].to_numpy(),
df["tstamp"].to_numpy(),
df["wcs_item_sk"].to_numpy(),
out_arr,
N,
)
result = | pd.DataFrame({"prior_item_viewed": out_arr}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
pdt.assert_series_equal(result['d_9'], expected)
# disagree values as nan
expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na')
pdt.assert_series_equal(result['d_na'], expected)
# disagree values as string
expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str')
pdt.assert_series_equal(result['d_str'], expected)
# tests/test_compare.py:TestCompareNumeric
class TestCompareNumeric(TestData):
"""Test the numeric comparison methods."""
def test_numeric(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 2, 3, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', 'step', offset=2)
comp.numeric('col', 'col', method='step', offset=2)
comp.numeric('col', 'col', 'step', 2)
result = comp.compute(ix, A, B)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1)
pdt.assert_series_equal(result[1], expected)
# Basics
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
def test_numeric_with_missings(self):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', scale=2)
comp.numeric('col', 'col', scale=2, missing_value=0)
comp.numeric('col', 'col', scale=2, missing_value=123.45)
comp.numeric('col', 'col', scale=2, missing_value=nan)
comp.numeric('col', 'col', scale=2, missing_value='str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
# Missing values as 0
expected = Series(
[1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1)
pdt.assert_series_equal(result[1], expected)
# Missing values as 123.45
expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2)
pdt.assert_series_equal(result[2], expected)
# Missing values as nan
expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3)
pdt.assert_series_equal(result[3], expected)
# Missing values as string
expected = Series(
[1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4)
pdt.assert_series_equal(result[4], expected)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms(self, alg):
A = DataFrame({'col': [1, 1, 1, 1, 1]})
B = DataFrame({'col': [1, 2, 3, 4, 5]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='step', offset=1, label='step')
comp.numeric(
'col', 'col', method='linear', offset=1, scale=2, label='linear')
comp.numeric(
'col', 'col', method='squared', offset=1, scale=2, label='squared')
comp.numeric(
'col', 'col', method='exp', offset=1, scale=2, label='exp')
comp.numeric(
'col', 'col', method='gauss', offset=1, scale=2, label='gauss')
result_df = comp.compute(ix, A, B)
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
if alg != 'step':
print(alg)
print(result)
# sim(scale) = 0.5
expected_bool = Series(
[False, False, False, True, False], index=ix, name=alg)
pdt.assert_series_equal(result == 0.5, expected_bool)
# sim(offset) = 1
expected_bool = Series(
[True, True, False, False, False], index=ix, name=alg)
pdt.assert_series_equal(result == 1.0, expected_bool)
# sim(scale) larger than 0.5
expected_bool = Series(
[False, False, True, False, False], index=ix, name=alg)
pdt.assert_series_equal((result > 0.5) & (result < 1.0),
expected_bool)
# sim(scale) smaller than 0.5
expected_bool = Series(
[False, False, False, False, True], index=ix, name=alg)
pdt.assert_series_equal((result < 0.5) & (result >= 0.0),
expected_bool)
@pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS)
def test_numeric_algorithms_errors(self, alg):
# scale negative
if alg != "step":
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
# offset negative
with pytest.raises(ValueError):
comp = recordlinkage.Compare()
comp.numeric('age', 'age', method=alg, offset=-2, scale=-2)
comp.compute(self.index_AB, self.A, self.B)
def test_numeric_does_not_exist(self):
# raise when algorithm doesn't exists
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.numeric('col', 'col', method='unknown_algorithm')
pytest.raises(ValueError, comp.compute, ix, A, B)
# tests/test_compare.py:TestCompareDates
class TestCompareDates(TestData):
"""Test the exact comparison method."""
def test_dates(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col')
result = comp.compute(ix, A, B)[0]
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0)
pdt.assert_series_equal(result, expected)
def test_date_incorrect_dtype(self):
A = DataFrame({
'col':
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']
})
B = DataFrame({
'col': [
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
A['col1'] = to_datetime(A['col'])
B['col1'] = to_datetime(B['col'])
comp = recordlinkage.Compare()
comp.date('col', 'col1')
pytest.raises(ValueError, comp.compute, ix, A, B)
comp = recordlinkage.Compare()
comp.date('col1', 'col')
pytest.raises(ValueError, comp.compute, ix, A, B)
def test_dates_with_missings(self):
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='m_')
comp.date('col', 'col', missing_value=0, label='m_0')
comp.date('col', 'col', missing_value=123.45, label='m_float')
comp.date('col', 'col', missing_value=nan, label='m_na')
comp.date('col', 'col', missing_value='str', label='m_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_')
pdt.assert_series_equal(result['m_'], expected)
# Missing values as 0
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0')
pdt.assert_series_equal(result['m_0'], expected)
# Missing values as 123.45
expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float')
pdt.assert_series_equal(result['m_float'], expected)
# Missing values as nan
expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na')
pdt.assert_series_equal(result['m_na'], expected)
# Missing values as string
expected = Series(
[1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str')
pdt.assert_series_equal(result['m_str'], expected)
def test_dates_with_swap(self):
months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45),
(2, 1, 123.45)]
A = DataFrame({
'col':
to_datetime(
['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'])
})
B = DataFrame({
'col':
to_datetime([
'2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01',
'2010/9/30'
])
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.date('col', 'col', label='s_')
comp.date(
'col', 'col', swap_month_day=0, swap_months='default', label='s_1')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months='default',
label='s_2')
comp.date(
'col',
'col',
swap_month_day=123.45,
swap_months=months_to_swap,
label='s_3')
comp.date(
'col',
'col',
swap_month_day=nan,
swap_months='default',
missing_value=nan,
label='s_4')
comp.date('col', 'col', swap_month_day='str', label='s_5')
result = comp.compute(ix, A, B)
# swap_month_day as default
expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_')
pdt.assert_series_equal(result['s_'], expected)
# swap_month_day and swap_months as 0
expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1')
pdt.assert_series_equal(result['s_1'], expected)
# swap_month_day 123.45 (float)
expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2')
pdt.assert_series_equal(result['s_2'], expected)
# swap_month_day and swap_months 123.45 (float)
expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3')
pdt.assert_series_equal(result['s_3'], expected)
# swap_month_day and swap_months as nan
expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4')
pdt.assert_series_equal(result['s_4'], expected)
# swap_month_day as string
expected = Series(
[1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5')
pdt.assert_series_equal(result['s_5'], expected)
# tests/test_compare.py:TestCompareGeo
class TestCompareGeo(TestData):
"""Test the geo comparison method."""
def test_geo(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step',
offset=50) # 50 km range
result = comp.compute(ix, A, B)
# Missing values as default [36.639460, 54.765854, 44.092472]
expected = Series([1.0, 0.0, 1.0], index=ix, name=0)
pdt.assert_series_equal(result[0], expected)
def test_geo_batch(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo(
'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='linear',
offset=1,
scale=2,
label='linear')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='squared',
offset=1,
scale=2,
label='squared')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='exp',
offset=1,
scale=2,
label='exp')
comp.geo(
'lat',
'lng',
'lat',
'lng',
method='gauss',
offset=1,
scale=2,
label='gauss')
result_df = comp.compute(ix, A, B)
print(result_df)
for alg in ['step', 'linear', 'squared', 'exp', 'gauss']:
result = result_df[alg]
# All values between 0 and 1.
assert (result >= 0.0).all()
assert (result <= 1.0).all()
def test_geo_does_not_exist(self):
# Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands)
A = DataFrame({
'lat': [52.0842455, 52.3747388, 51.9280573],
'lng': [5.0124516, 4.7585305, 4.4203581]
})
B = DataFrame({
'lat': [52.3747388, 51.9280573, 52.0842455],
'lng': [4.7585305, 4.4203581, 5.0124516]
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.geo('lat', 'lng', 'lat', 'lng', method='unknown')
pytest.raises(ValueError, comp.compute, ix, A, B)
class TestCompareStrings(TestData):
"""Test the exact comparison method."""
def test_defaults(self):
# default algorithm is levenshtein algorithm
# test default values are indentical to levenshtein
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', label='default')
comp.string('col', 'col', method='levenshtein', label='with_args')
result = comp.compute(ix, A, B)
pdt.assert_series_equal(
result['default'].rename(None),
result['with_args'].rename(None)
)
def test_fuzzy(self):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method='jaro', missing_value=0)
comp.string('col', 'col', method='q_gram', missing_value=0)
comp.string('col', 'col', method='cosine', missing_value=0)
comp.string('col', 'col', method='jaro_winkler', missing_value=0)
comp.string('col', 'col', method='dameraulevenshtein', missing_value=0)
comp.string('col', 'col', method='levenshtein', missing_value=0)
result = comp.compute(ix, A, B)
print(result)
assert result.notnull().all(1).all(0)
assert (result[result.notnull()] >= 0).all(1).all(0)
assert (result[result.notnull()] <= 1).all(1).all(0)
def test_threshold(self):
A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]})
B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.5,
missing_value=2.0,
label="x_col1"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=1.0,
missing_value=0.5,
label="x_col2"
)
comp.string(
'col',
'col',
method="levenshtein",
threshold=0.0,
missing_value=nan,
label="x_col3"
)
result = comp.compute(ix, A, B)
expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1")
pdt.assert_series_equal(result["x_col1"], expected)
expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2")
pdt.assert_series_equal(result["x_col2"], expected)
expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3")
pdt.assert_series_equal(result["x_col3"], expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_incorrect_input(self, alg):
A = DataFrame({'col': [1, 1, 1, nan, 0]})
B = DataFrame({'col': [1, 1, 1, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
with pytest.raises(Exception):
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
comp.compute(ix, A, B)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms_nan(self, alg):
A = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
B = DataFrame({'col': [u"nan", nan, nan, nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 0.0, 0.0, 0.0, 0.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=nan)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, nan, nan, nan, nan], index=ix, name=0)
pdt.assert_series_equal(result, expected)
comp = recordlinkage.Compare()
comp.string('col', 'col', method=alg, missing_value=9.0)
result = comp.compute(ix, A, B)[0]
expected = Series([1.0, 9.0, 9.0, 9.0, 9.0], index=ix, name=0)
pdt.assert_series_equal(result, expected)
@pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS)
def test_string_algorithms(self, alg):
A = DataFrame({
'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf']
})
B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]})
ix = | MultiIndex.from_arrays([A.index.values, B.index.values]) | pandas.MultiIndex.from_arrays |
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import streamlit as st
@st.cache(ttl=300)
def remove_empty_cols(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[Any]]:
"""Remove columns with strictly less than 2 distinct values in input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be checked and potentially removed.
Returns
-------
pd.DataFrame
Dataframe with empty columns removed.
list
List of columns that have been removed.
"""
count_cols = df.nunique(dropna=False)
empty_cols = list(count_cols[count_cols < 2].index)
return df.drop(empty_cols, axis=1), empty_cols
def print_empty_cols(empty_cols: List[Any]) -> None:
"""Displays a message in streamlit dashboard if the input list is not empty.
Parameters
----------
empty_cols : list
List of columns that have been removed.
"""
L = len(empty_cols)
if L > 0:
st.error(
f'The following column{"s" if L > 1 else ""} ha{"ve" if L > 1 else "s"} been removed because '
f'{"they have" if L > 1 else "it has"} <= 1 distinct values: {", ".join(empty_cols)}'
)
@st.cache(suppress_st_warning=True, ttl=300)
def format_date_and_target(
df_input: pd.DataFrame,
date_col: str,
target_col: str,
config: Dict[Any, Any],
load_options: Dict[Any, Any],
) -> pd.DataFrame:
"""Formats date and target columns of input dataframe.
Parameters
----------
df_input : pd.DataFrame
Input dataframe whose columns will be formatted.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
config : Dict
Lib configuration dictionary.
load_options : Dict
Loading options selected by user.
Returns
-------
pd.DataFrame
Dataframe with columns formatted.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
df = _format_date(df, date_col, load_options, config)
df = _format_target(df, target_col, config)
df = _rename_cols(df, date_col, target_col)
return df
def _format_date(
df: pd.DataFrame, date_col: str, load_options: Dict[Any, Any], config: Dict[Any, Any]
) -> pd.DataFrame:
"""Formats date column of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
date_col : str
Name of date column in input dataframe.
load_options : Dict
Loading options selected by user.
config : Dict
Lib config dictionary containing information about default date format.
Returns
-------
pd.DataFrame
Dataframe with date column formatted.
"""
try:
date_series = pd.to_datetime(df[date_col])
if __check_date_format(date_series) | (
config["dataprep"]["date_format"] != load_options["date_format"]
):
date_series = pd.to_datetime(df[date_col], format=load_options["date_format"])
df[date_col] = date_series
days_range = (df[date_col].max() - df[date_col].min()).days
sec_range = (df[date_col].max() - df[date_col].min()).seconds
if ((days_range < 1) & (sec_range < 1)) | (np.isnan(days_range) & np.isnan(sec_range)):
st.error(
"Please select the correct date column (selected column has a time range < 1s)."
)
st.stop()
return df
except:
st.error(
"Please select a valid date format (selected column can't be converted into date)."
)
st.stop()
def __check_date_format(date_series: pd.Series) -> bool:
"""Checks whether the date column has been correctly converted to datetime.
Parameters
----------
date_series : pd.Series
Date column that has been converted.
Returns
-------
bool
False if conversion has not worked correctly, True otherwise.
"""
test1 = date_series.map(lambda x: x.year).nunique() < 2
test2 = date_series.map(lambda x: x.month).nunique() < 2
test3 = date_series.map(lambda x: x.day).nunique() < 2
if test1 & test2 & test3:
return True
else:
return False
def _format_target(df: pd.DataFrame, target_col: str, config: Dict[Any, Any]) -> pd.DataFrame:
"""Formats target column of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe with date column formatted.
"""
try:
df[target_col] = df[target_col].astype("float")
if df[target_col].nunique() < config["validity"]["min_target_cardinality"]:
st.error(
"Please select the correct target column (should be numerical, not categorical)."
)
st.stop()
return df
except:
st.error("Please select the correct target column (should be of type int or float).")
st.stop()
def _rename_cols(df: pd.DataFrame, date_col: str, target_col: str) -> pd.DataFrame:
"""Renames date and target columns of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be renamed.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe with columns renamed.
"""
if (target_col != "y") and ("y" in df.columns):
df = df.rename(columns={"y": "y_2"})
if (date_col != "ds") and ("ds" in df.columns):
df = df.rename(columns={"ds": "ds_2"})
df = df.rename(columns={date_col: "ds", target_col: "y"})
return df
# NB: date_col and target_col not used, only added to avoid unexpected caching when their values change
@st.cache(ttl=300)
def filter_and_aggregate_df(
df_input: pd.DataFrame,
dimensions: Dict[Any, Any],
config: Dict[Any, Any],
date_col: str,
target_col: str,
) -> Tuple[pd.DataFrame, List[Any]]:
"""Filters and aggregates input dataframe according to dimensions dictionary specifications.
Parameters
----------
df_input : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering and aggregation specifications.
config : Dict
Lib configuration dictionary.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe filtered and/or aggregated.
list
List of columns removed from input dataframe.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
df = _filter(df, dimensions)
df, cols_to_drop = _format_regressors(df, config)
df = _aggregate(df, dimensions)
return df, cols_to_drop
def _filter(df: pd.DataFrame, dimensions: Dict[Any, Any]) -> pd.DataFrame:
"""Filters input dataframe according to dimensions dictionary specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering specifications.
Returns
-------
pd.DataFrame
Filtered dataframe.
"""
filter_cols = list(set(dimensions.keys()) - {"agg"})
for col in filter_cols:
df = df.loc[df[col].isin(dimensions[col])]
return df.drop(filter_cols, axis=1)
def _format_regressors(df: pd.DataFrame, config: Dict[Any, Any]) -> Tuple[pd.DataFrame, List[Any]]:
"""Format some columns in input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
config : Dict
Lib configuration dictionary.
Returns
-------
pd.DataFrame
Formatted dataframe.
list
List of columns removed from input dataframe.
"""
cols_to_drop = []
for col in set(df.columns) - {"ds", "y"}:
if df[col].nunique(dropna=False) < 2:
cols_to_drop.append(col)
elif df[col].nunique(dropna=False) == 2:
df[col] = df[col].map(dict(zip(df[col].unique(), [0, 1])))
elif df[col].nunique() <= config["validity"]["max_cat_reg_cardinality"]:
df = __one_hot_encoding(df, col)
else:
try:
df[col] = df[col].astype("float")
except:
cols_to_drop.append(col)
return df.drop(cols_to_drop, axis=1), cols_to_drop
def __one_hot_encoding(df: pd.DataFrame, col: str) -> pd.DataFrame:
"""Applies one-hot encoding to some columns of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be one-hot encoded.
col : list
List of columns to one-hot encode.
Returns
-------
pd.DataFrame
One-hot encoded dataframe.
"""
df = pd.concat([df, pd.get_dummies(df[col], prefix=col)], axis=1)
return df.drop(col, axis=1)
def print_removed_cols(cols_removed: List[Any]) -> None:
"""Displays a message in streamlit dashboard if the input list is not empty.
Parameters
----------
cols_removed : list
List of columns that have been removed.
"""
L = len(cols_removed)
if L > 0:
st.error(
f'The following column{"s" if L > 1 else ""} ha{"ve" if L > 1 else "s"} been removed because '
f'{"they are" if L > 1 else "it is"} neither the target, '
f'nor a dimension, nor a potential regressor: {", ".join(cols_removed)}'
)
def _aggregate(df: pd.DataFrame, dimensions: Dict[Any, Any]) -> pd.DataFrame:
"""Aggregates input dataframe according to dimensions dictionary specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering specifications.
Returns
-------
pd.DataFrame
Aggregated dataframe.
"""
cols_to_agg = set(df.columns) - {"ds", "y"}
agg_dict = {col: "mean" if df[col].nunique() > 2 else "max" for col in cols_to_agg}
agg_dict["y"] = dimensions["agg"].lower()
return df.groupby("ds").agg(agg_dict).reset_index()
@st.cache(ttl=300)
def format_datetime(df_input: pd.DataFrame, resampling: Dict[Any, Any]) -> pd.DataFrame:
"""Formats date column to datetime in input dataframe.
Parameters
----------
df_input : pd.DataFrame
Input dataframe whose date column will be formatted to datetime.
resampling : Dict
Dictionary whose "freq" key contains the frequency of input dataframe.
Returns
-------
pd.DataFrame
Dataframe with date column formatted to datetime.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
if resampling["freq"][-1] in ["H", "s"]:
df["ds"] = df["ds"].map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
df["ds"] = pd.to_datetime(df["ds"])
return df
@st.cache(ttl=300)
def resample_df(df_input: pd.DataFrame, resampling: Dict[Any, Any]) -> pd.DataFrame:
"""Resamples input dataframe according to resampling dictionary specifications.
Parameters
----------
df_input : pd.DataFrame
Input dataframe that will be resampled.
resampling : Dict
Resampling specifications.
Returns
-------
pd.DataFrame
Resampled dataframe.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
if resampling["resample"]:
cols_to_agg = set(df.columns) - {"ds", "y"}
agg_dict = {col: "mean" if df[col].nunique() > 2 else "max" for col in cols_to_agg}
agg_dict["y"] = resampling["agg"].lower()
df = df.set_index("ds").resample(resampling["freq"][-1]).agg(agg_dict).reset_index()
return df
def check_dataset_size(df: pd.DataFrame, config: Dict[Any, Any]) -> None:
"""Displays a message in streamlit dashboard and stops it if the input dataframe has not enough rows.
Parameters
----------
df : pd.DataFrame
Input dataframe.
config : Dict
Lib configuration dictionary where the minimum number of rows is given.
"""
if (
len(df)
<= config["validity"]["min_data_points_train"] + config["validity"]["min_data_points_val"]
):
st.error(
f"The dataset has not enough data points ({len(df)} data points only) to make a forecast. "
f"Please resample with a higher frequency or change cleaning options."
)
st.stop()
def check_future_regressors_df(
datasets: Dict[Any, Any],
dates: Dict[Any, Any],
params: Dict[Any, Any],
resampling: Dict[Any, Any],
date_col: str,
dimensions: Dict[Any, Any],
) -> bool:
"""Displays a message if the future regressors dataframe is incorrect and says whether or not to use it afterwards.
Parameters
----------
datasets : Dict
Dictionary storing all dataframes.
dates : Dict
Dictionary containing future forecasting dates information.
params : Dict
Dictionary containing all model parameters and list of selected regressors.
resampling : Dict
Dictionary containing dataset frequency information.
date_col : str
Name of date column.
dimensions : Dict
Dictionary containing dimensions information.
Returns
-------
bool
Whether or not to use regressors for future forecast.
"""
use_regressors = False
if "future_regressors" in datasets.keys():
# Check date column
if date_col not in datasets["future_regressors"].columns:
st.error(
f"Date column '{date_col}' not found in the dataset provided for future regressors."
)
st.stop()
# Check number of distinct dates
N_dates_input = datasets["future_regressors"][date_col].nunique()
N_dates_expected = len(
pd.date_range(
start=dates["forecast_start_date"],
end=dates["forecast_end_date"],
freq=resampling["freq"],
)
)
if N_dates_input != N_dates_expected:
st.error(
f"The dataset provided for future regressors has the right number of distinct dates "
f"(expected {N_dates_expected}, found {N_dates_input}). "
f"Please make sure that the date column goes from {dates['forecast_start_date'].strftime('%Y-%m-%d')} "
f"to {dates['forecast_end_date'].strftime('%Y-%m-%d')} at frequency {resampling['freq']} "
f"without skipping any date in this range."
)
st.stop()
# Check regressors
regressors_expected = set(params["regressors"].keys())
input_cols = set(datasets["future_regressors"])
if len(input_cols.intersection(regressors_expected)) != len(regressors_expected):
missing_regressors = [reg for reg in regressors_expected if reg not in input_cols]
if len(missing_regressors) > 1:
st.error(
f"Columns {', '.join(missing_regressors[:-1])} and {missing_regressors[-1]} are missing "
f"in the dataset provided for future regressors."
)
else:
st.error(
f"Column {missing_regressors[0]} is missing in the dataset provided for future regressors."
)
st.stop()
# Check dimensions
dim_expected = {dim for dim in dimensions.keys() if dim != "agg"}
if len(input_cols.intersection(dim_expected)) != len(dim_expected):
missing_dim = [dim for dim in dim_expected if dim not in input_cols]
if len(missing_dim) > 1:
st.error(
f"Dimension columns {', '.join(missing_dim[:-1])} and {missing_dim[-1]} are missing "
f"in the dataset provided for future regressors."
)
else:
st.error(
f"Dimension column {missing_dim[0]} is missing in the dataset provided for future regressors."
)
st.stop()
use_regressors = True
return use_regressors
def prepare_future_df(
datasets: Dict[Any, Any],
dates: Dict[Any, Any],
date_col: str,
target_col: str,
dimensions: Dict[Any, Any],
load_options: Dict[Any, Any],
config: Dict[Any, Any],
resampling: Dict[Any, Any],
) -> Tuple[pd.DataFrame, Dict[Any, Any]]:
"""Applies data preparation to the dataset provided with future regressors.
Parameters
----------
datasets : Dict
Dictionary storing all dataframes.
dates : Dict
Dictionary containing future forecasting dates information.
date_col : str
Name of date column.
target_col : str
Name of target column.
dimensions : Dict
Dictionary containing dimensions information.
load_options : Dict
Loading options selected by user.
config : Dict
Lib configuration dictionary.
resampling : Dict
Resampling specifications.
Returns
-------
pd.DataFrame
Prepared future dataframe.
dict
Dictionary storing all dataframes.
"""
if "future_regressors" in datasets.keys():
future = datasets["future_regressors"]
future[target_col] = 0
future = pd.concat([datasets["uploaded"][list(future.columns)], future], axis=0)
future, _ = remove_empty_cols(future)
future = format_date_and_target(future, date_col, target_col, config, load_options)
future, _ = filter_and_aggregate_df(future, dimensions, config, date_col, target_col)
future = format_datetime(future, resampling)
future = resample_df(future, resampling)
datasets["full"] = future.loc[future["ds"] < dates["forecast_start_date"]]
future = future.drop("y", axis=1)
else:
future_dates = pd.date_range(
start=datasets["full"].ds.min(),
end=dates["forecast_end_date"],
freq=dates["forecast_freq"],
)
future = | pd.DataFrame(future_dates, columns=["ds"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""System transmission plots.
This code creates transmission line and interface plots.
@author: <NAME>, <NAME>
"""
import os
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, InputSheetError, MissingMetaData, UnsupportedAggregation, MissingZoneData)
class MPlot(PlotDataHelper):
"""transmission MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The transmission.py module contains methods that are
related to the transmission network.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.font_defaults = mconfig.parser("font_settings")
def line_util(self, **kwargs):
"""Creates a timeseries line plot of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the y-axis.
The plot will default to showing the 10 highest utilized lines. A Line category
can also be passed instead, using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(**kwargs)
return outputs
def line_hist(self, **kwargs):
"""Creates a histogram of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the x-axis, with # lines on the y-axis.
Each bar is equal to a 0.05 utilization rate
The plot will default to showing all lines. A Line category can also be passed
instead using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() and passes the hist=True argument to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(hist=True, **kwargs)
return outputs
def _util(self, hist: bool = False, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates utilization plots, line plot and histograms
This methods is called from line_util() and line_hist()
Args:
hist (bool, optional): If True creates a histogram of utilization.
Defaults to False.
prop (str, optional): Optional PLEXOS line category to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(facet=True,
multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"For all lines touching Zone = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.25)
data_table=[]
for n, scenario in enumerate(self.Scenarios):
self.logger.info(f"Scenario = {str(scenario)}")
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.warning("Column to Aggregate by is missing")
continue
try:
zone_lines = zone_lines.xs(zone_input)
zone_lines=zone_lines['line_name'].unique()
except KeyError:
self.logger.warning('No data to plot for scenario')
outputs[zone_input] = MissingZoneData()
continue
flow = self["line_Flow"].get(scenario).copy()
#Limit to only lines touching to this zone
flow = flow[flow.index.get_level_values('line_name').isin(zone_lines)]
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
limits = self["line_Import_Limit"].get(scenario).copy()
limits = limits.droplevel('timestamp').drop_duplicates()
limits.mask(limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
# This checks for a nan in string. If no scenario selected, do nothing.
if pd.notna(prop):
self.logger.info(f"Line category = {str(prop)}")
line_relations = self.meta.lines(scenario).rename(columns={"name":"line_name"}).set_index(["line_name"])
flow=pd.merge(flow,line_relations, left_index=True,
right_index=True)
flow=flow[flow["category"] == prop]
flow=flow.drop('category',axis=1)
flow = pd.merge(flow,limits[0].abs(),on = 'line_name',how='left')
flow['Util']=(flow['0_x'].abs()/flow['0_y']).fillna(0)
#If greater than 1 because exceeds flow limit, report as 1
flow['Util'][flow['Util'] > 1] = 1
annual_util=flow['Util'].groupby(["line_name"]).mean().rename(scenario)
# top annual utilized lines
top_utilization = annual_util.nlargest(10, keep='first')
color_dict = dict(zip(self.Scenarios,self.color_list))
if hist == True:
mplt.histogram(annual_util, color_dict,label=scenario, sub_pos=n)
else:
for line in top_utilization.index.get_level_values(level='line_name').unique():
duration_curve = flow.loc[line].sort_values(by='Util',
ascending=False).reset_index(drop=True)
mplt.lineplot(duration_curve, 'Util' ,label=line, sub_pos=n)
axs[n].set_ylim((0,1.1))
data_table.append(annual_util)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
if hist == True:
if pd.notna(prop):
prop_name = 'All Lines'
else:
prop_name = prop
plt.ylabel('Number of lines', color='black',
rotation='vertical', labelpad=30)
plt.xlabel(f'Line Utilization: {prop_name}', color='black',
rotation='horizontal', labelpad=30)
else:
if pd.notna(prop):
prop_name ='Top 10 Lines'
else:
prop_name = prop
plt.ylabel(f'Line Utilization: {prop_name}', color='black',
rotation='vertical', labelpad=60)
plt.xlabel('Intervals', color='black',
rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
try:
del annual_util,
except:
continue
Data_Out = pd.concat(data_table)
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def int_flow_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of interchange flows and their import and export limits.
Each interchange is potted on a separate facet plot.
The plot includes every interchange that originates or ends in the aggregation zone.
This can be adjusted by passing a comma separated string of interchanges to the property input.
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for zone_input in self.Zones:
self.logger.info(f"For all interfaces touching Zone = {zone_input}")
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).copy().droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns=['interface_category', 'units'], inplace=True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).copy().droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns=['interface_category', 'units'], inplace=True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if pd.notna(prop):
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf, level='interface_name') / 1000
single_int.index = single_int.index.droplevel(['interface_category','units'])
single_int.columns = [interf]
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
if pd.notna(start_date_range):
single_int = single_int[start_date_range : end_date_range]
limits = limits[start_date_range : end_date_range]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int, interf,
label=f"{scenario}\n interface flow",
sub_pos=n)
# Only print limits if it doesn't change monthly or if you are plotting a time series.
# Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits, 'export limit',
label='export limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
mplt.lineplot(limits, 'import limit',
label='import limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(f"{interf} not found in results. Have you tagged "
"it with the 'Must Report' property in PLEXOS?")
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
mplt.add_legend()
plt.ylabel('Flow (GW)', color='black', rotation='vertical',
labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def int_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""#TODO: Finish Docstring
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if prop != '':
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for interf in interf_list:
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf not in reported_ints:
self.logger.warning(interf + ' not found in results.')
interf_list.remove(interf)
if not interf_list:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(interf_list)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
summer = single_int[start_date_range:end_date_range]
winter = single_int.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,interf)
winter = self.sort_duration(winter,interf)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[n,0].plot(summer[interf],linewidth = 1,label = scenario + '\n interface flow')
axs[n,1].plot(winter[interf],linewidth = 1,label = scenario + '\n interface flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[n,0].plot(summer_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
axs[n,1].plot(winter_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
axs[n,0].set_title(interf)
axs[n,1].set_title(interf)
if not duration_curve:
locator = mdates.AutoDateLocator(minticks=4, maxticks=8)
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats[2] = '%d\n %b'
formatter.zero_formats[1] = '%b\n %Y'
formatter.zero_formats[2] = '%d\n %b'
formatter.zero_formats[3] = '%H:%M\n %d-%b'
formatter.offset_formats[3] = '%b %Y'
formatter.show_offset = False
axs[n,0].xaxis.set_major_locator(locator)
axs[n,0].xaxis.set_major_formatter(formatter)
axs[n,1].xaxis.set_major_locator(locator)
axs[n,1].xaxis.set_major_formatter(formatter)
mplt.add_legend()
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
fig.text(0.15,0.98,'Summer (' + start_date_range + ' to ' + end_date_range + ')',fontsize = 16)
fig.text(0.58,0.98,'Winter',fontsize = 16)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
#TODO: re-organize parameters (self vs. not self)
def int_flow_ind_diff(self, figure_name: str = None, **_):
"""Plot under development
This method plots the hourly difference in interface flow between two scenarios for
individual interfaces, with a facet for each interface.
The two scenarios are defined in the "Scenario_Diff" row of Marmot_user_defined_inputs.
The interfaces are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The figure and data tables are saved within the module.
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() # TODO: add new get_data method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
check_input_data = []
Flow_Collection = {}
Import_Limit_Collection = {}
Export_Limit_Collection = {}
check_input_data.extend([get_data(Flow_Collection,"interface_Flow",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Import_Limit_Collection,"interface_Import_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Export_Limit_Collection,"interface_Export_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
scenario = self.Scenarios[0]
outputs = {}
if not pd.isnull(self.start_date):
self.logger.info("Plotting specific date range: \
{} to {}".format(str(self.start_date),str(self.end_date)))
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = Export_Limit_Collection.get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = Import_Limit_Collection.get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = Flow_Collection[self.Scenarios[0]].index.get_level_values('timestamp').unique()
if self.prop != '':
interf_list = self.prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter in method
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = Flow_Collection.get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if not pd.isnull(self.start_date):
single_int = single_int[self.start_date : self.end_date]
limits = limits[self.start_date : self.end_date]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int,interf,label = scenario + '\n interface flow', sub_pos = n)
#Only print limits if it doesn't change monthly or if you are plotting a time series. Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits,'export limit',label = 'export limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
mplt.lineplot(limits,'import limit',label = 'import limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int),name = 'Scenario')
single_int_out = single_int.set_index([scenario_names],append = True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(interf + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
handles, labels = axs[n].get_legend_handles_labels()
if not duration_curve:
self.set_subplot_timeseries_format(axs, sub_pos=n)
if n == len(interf_list) - 1:
axs[n].legend(loc='lower left',bbox_to_anchor=(1.05,-0.2))
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def line_flow_ind(self, figure_name: str = None, prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots flow, import and export limit, for individual transmission lines,
with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables are returned to plot_main
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0] #Select single scenario for purpose of extracting limits.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
flows = self["line_Flow"][scenario]
# limited_lines = []
# i = 0
# all_lines = flows.index.get_level_values('line_name').unique()
# for line in all_lines:
# i += 1
# print(line)
# print(i / len(all_lines))
# exp = export_limits.loc[line].squeeze()[0]
# imp = import_limits.loc[line].squeeze()[0]
# flow = flows.xs(line,level = 'line_name')[0].tolist()
# if exp in flow or imp in flow:
# limited_lines.append(line)
# print(limited_lines)
# pd.DataFrame(limited_lines).to_csv('/Users/mschwarz/OR OSW local/Solutions/Figures_Output/limited_lines.csv')
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_imp_lim = import_limits.loc[line]
limits = pd.concat([single_exp_lim,single_imp_lim])
limits_chunks.append(limits)
single_exp_lim = single_exp_lim.squeeze()
single_imp_lim = single_imp_lim.squeeze()
# If export/import limits were pulled as an interval property, take the average.
if len(single_exp_lim) > 1:
single_exp_lim = single_exp_lim.mean()
single_imp_lim = single_imp_lim.mean()
limits = pd.Series([single_exp_lim,single_imp_lim],name = line)
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
mplt.lineplot(single_line, line, label = scenario + '\n line flow', sub_pos=n)
#Add %congested number to plot.
if scenario == self.Scenarios[0]:
viol_exp = single_line[single_line[line] > single_exp_lim].count()
viol_imp = single_line[single_line[line] < single_imp_lim].count()
viol_perc = 100 * (viol_exp + viol_imp) / len(single_line)
viol_perc = round(viol_perc.squeeze(),3)
axs[n].annotate('Violation = ' + str(viol_perc) + '% of hours', xy = (0.1,0.15),xycoords='axes fraction')
cong_exp = single_line[single_line[line] == single_exp_lim].count()
cong_imp = single_line[single_line[line] == single_imp_lim].count()
cong_perc = 100 * (cong_exp + cong_imp) / len(single_line)
cong_perc = round(cong_perc.squeeze(),0)
axs[n].annotate('Congestion = ' + str(cong_perc) + '% of hours', xy = (0.1,0.1),xycoords='axes fraction')
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].axhline(y = single_exp_lim, ls = '--',label = 'Export Limit',color = 'red')
axs[n].axhline(y = single_imp_lim, ls = '--',label = 'Import Limit', color = 'green')
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
mplt.add_legend()
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
#plt.tight_layout(rect=[0, 0.03, 1, 0.97])
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
# Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + 'limits.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_diff(self, figure_name: str = None,
prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots the flow difference for individual transmission lines, with a facet for each line.
The scenarios are specified in the "Scenario_Diff_plot" field of Marmot_user_defined_inputs.csv.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
Figures and data tables are saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
outputs = InputSheetError()
return outputs
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
flow_diff = self["line_Flow"].get(self.Scenario_Diff[1]) - self["line_Flow"].get(self.Scenario_Diff[0])
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
reported_lines = self["line_Flow"].get(self.Scenarios[0]).index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
single_line = flow_diff.xs(line,level = 'line_name')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
#mplt.lineplot(single_line,line, label = self.Scenario_Diff[1] + ' - \n' + self.Scenario_Diff[0] + '\n line flow', sub_pos = n)
mplt.lineplot(single_line,line, label = 'BESS - no BESS \n line flow', sub_pos=n)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
chunks.append(single_line_out)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
mplt.add_legend()
plt.ylabel('Flow difference (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""TODO: Finish Docstring.
This method differs from the previous method, in that it plots seasonal line limits.
To use this method, line import/export must be an "interval" property, not a "year" property.
This can be selected in "plexos_properties.csv".
Re-run the formatter if necessary, it will overwrite the existing properties in "*_formatted.h5"
This method plots flow, import and export limit, for individual transmission lines, with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
start_date_range (str, optional): [description]. Defaults to None.
end_date_range (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
if pd.isna(start_date_range):
self.logger.warning('You are attempting to plot a time series facetted by two seasons,\n\
but you are missing a value in the "Start Date" column of "Marmot_plot_select.csv" \
Please enter dates in "Start Date" and "End Date". These will define the bounds of \
one of your two seasons. The other season will be comprised of the rest of the year.')
return MissingInputData()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0]
#Line limits are seasonal.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
#Extract time index
ti = self["line_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for line in select_lines:
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line not in reported_lines:
self.logger.warning(line + ' not found in results.')
select_lines.remove(line)
if not select_lines:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(select_lines)
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
i = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
i += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_exp_lim.index = ti
single_imp_lim = import_limits.loc[line]
single_imp_lim.index = ti
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line_out = single_line.copy()
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
#Split into seasons.
summer = single_line[start_date_range : end_date_range]
winter = single_line.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,line)
winter = self.sort_duration(winter,line)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[i,0].plot(summer[line],linewidth = 1,label = scenario + '\n line flow')
axs[i,1].plot(winter[line],linewidth = 1,label = scenario + '\n line flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[i,0].plot(summer_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
axs[i,1].plot(winter_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
for j in [0,1]:
axs[i,j].spines['right'].set_visible(False)
axs[i,j].spines['top'].set_visible(False)
axs[i,j].tick_params(axis='y', which='major', length=5, width=1)
axs[i,j].tick_params(axis='x', which='major', length=5, width=1)
axs[i,j].set_title(line)
if i == len(select_lines) - 1:
axs[i,j].legend(loc = 'lower left',bbox_to_anchor=(1.05,0),facecolor='inherit', frameon=True)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out.columns = [line]
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
fig.text(0.3,1,'Summer (Jun - Sep)')
fig.text(0.6,1,'Winter (Jan - Mar,Oct - Dec)')
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.csv'))
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Limits.csv'))
outputs = DataSavedInModule()
return outputs
def extract_tx_cap(self, **_):
"""Plot under development
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() #TODO: Needs finishing
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for scenario in self.Scenarios:
self.logger.info(scenario)
for zone_input in self.Zones:
#Lines
# lines = self.meta.region_interregionallines(scenario)
# if scenario == 'ADS':
# zone_input = zone_input.split('_WI')[0]
# lines = self.meta_ADS.region_interregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
# lines = self.meta.region_intraregionallines(scenario)
# if scenario == 'ADS':
# lines = self.meta_ADS.region_intraregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Intraregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
#Interfaces
PSCo_ints = ['P39 TOT 5_WI','P40 TOT 7_WI']
int_import_lim = self["interface_Import_Limit"][scenario].reset_index()
int_export_lim = self["interface_Export_Limit"][scenario].reset_index()
if scenario == 'NARIS':
last_timestamp = int_import_lim['timestamp'].unique()[-1] #Last because ADS uses the last timestamp.
int_import_lim = int_import_lim[int_import_lim['timestamp'] == last_timestamp]
int_export_lim = int_export_lim[int_export_lim['timestamp'] == last_timestamp]
lines2ints = self.meta_ADS.interface_lines()
else:
lines2ints = self.meta.interface_lines(scenario)
fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','test_meta_' + scenario + '.csv')
lines2ints.to_csv(fn)
ints = pd.merge(int_import_lim,int_export_lim,how = 'inner', on = 'interface_name')
ints.rename(columns = {'0_x':'import_limit','0_y': 'export_limit'},inplace = True)
all_lines_in_ints = lines2ints['line'].unique()
test = [line for line in lines['line_name'].unique() if line in all_lines_in_ints]
ints = ints.merge(lines2ints, how = 'inner', left_on = 'interface_name',right_on = 'interface')
def region_region_interchange_all_scenarios(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region.
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
Each scenarios is plotted on a separate Facet plot.
Figures and data tables are returned to plot_main
"""
outputs = self._region_region_interchange(self.Scenarios, **kwargs)
return outputs
def region_region_interchange_all_regions(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region. All regions are plotted on a single figure with each focus region placed on a separate
facet plot
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
This figure only plots a single scenario that is defined by Main_scenario_plot in user_defined_inputs.csv.
Figures and data tables are saved within method
"""
outputs = self._region_region_interchange([self.Scenarios[0]],plot_scenario=False, **kwargs)
return outputs
def _region_region_interchange(self, scenario_type: str, plot_scenario: bool = True,
timezone: str = "", **_):
"""#TODO: Finish Docstring
Args:
scenario_type (str): [description]
plot_scenario (bool, optional): [description]. Defaults to True.
timezone (str, optional): [description]. Defaults to "".
Returns:
[type]: [description]
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",scenario_type)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=scenario_type)
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.3)
data_table_chunks=[]
n=0
for scenario in scenario_type:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
# For plot_main handeling - need to find better solution
if plot_scenario == False:
outputs={}
for zone_input in self.Zones:
outputs[zone_input] = pd.DataFrame()
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['timestamp','parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg = rr_int_agg.reset_index()
# If plotting all regions update plot setup
if plot_scenario == False:
#Make a facet plot, one panel for each parent zone.
parent_region = rr_int_agg['parent'].unique()
plot_number = len(parent_region)
ncols, nrows = self.set_x_y_dimension(plot_number)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.7)
else:
parent_region = [zone_input]
plot_number = len(scenario_type)
grid_size = ncols*nrows
excess_axs = grid_size - plot_number
for parent in parent_region:
single_parent = rr_int_agg[rr_int_agg['parent'] == parent]
single_parent = single_parent.pivot(index = 'timestamp',columns = 'child',values = 'flow (MW)')
single_parent = single_parent.loc[:,(single_parent != 0).any(axis = 0)] #Remove all 0 columns (uninteresting).
if (parent in single_parent.columns):
single_parent = single_parent.drop(columns = [parent]) #Remove columns if parent = child
#Neaten up lines: if more than 4 total interchanges, aggregated all but the highest 3.
if len(single_parent.columns) > 4:
# Set the "three highest zonal interchanges" for all three scenarios.
cols_dontagg = single_parent.max().abs().sort_values(ascending = False)[0:3].index
df_dontagg = single_parent[cols_dontagg]
df_toagg = single_parent.drop(columns = cols_dontagg)
agged = df_toagg.sum(axis = 1)
df_dontagg.insert(len(df_dontagg.columns),'Other',agged)
single_parent = df_dontagg.copy()
#Convert units
if n == 0:
unitconversion = self.capacity_energy_unitconversion(single_parent)
single_parent = single_parent / unitconversion['divisor']
for column in single_parent.columns:
mplt.lineplot(single_parent, column, label=column, sub_pos=n)
axs[n].set_title(parent)
axs[n].margins(x=0.01)
mplt.set_subplot_timeseries_format(sub_pos=n)
axs[n].hlines(y = 0, xmin = axs[n].get_xlim()[0], xmax = axs[n].get_xlim()[1], linestyle = ':') #Add horizontal line at 0.
axs[n].legend(loc='lower left',bbox_to_anchor=(1,0))
n+=1
# Create data table for each scenario
scenario_names = pd.Series([scenario]*len(single_parent),name='Scenario')
data_table = single_parent.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append=True)
data_table_chunks.append(data_table)
# if plotting all scenarios add facet labels
if plot_scenario == True:
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
#Remove extra axes
mplt.remove_excess_axs(excess_axs, grid_size)
plt.xlabel(timezone, color='black', rotation='horizontal',labelpad = 30)
plt.ylabel(f"Net Interchange ({unitconversion['units']})", color='black', rotation='vertical', labelpad = 40)
# If plotting all regions save output and return none plot_main
if plot_scenario == False:
# Location to save to
Data_Table_Out = rr_int_agg
save_figures = os.path.join(self.figure_folder, self.AGG_BY + '_transmission')
fig.savefig(os.path.join(save_figures, "Region_Region_Interchange_{}.svg".format(self.Scenarios[0])), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "Region_Region_Interchange_{}.csv".format(self.Scenarios[0])))
outputs = DataSavedInModule()
return outputs
Data_Out = pd.concat(data_table_chunks, copy=False, axis=0)
# if plotting all scenarios return figures to plot_main
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def region_region_checkerboard(self, **_):
"""Creates a checkerboard/heatmap figure showing total interchanges between regions/zones.
Each scenario is plotted on its own facet plot.
Plots and Data are saved within the module.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
ncols, nrows = self.set_x_y_dimension(len(self.Scenarios))
grid_size = ncols*nrows
excess_axs = grid_size - len(self.Scenarios)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.02, hspace=0.4)
max_flow_group = []
Data_Out = []
n=0
for scenario in self.Scenarios:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg=rr_int_agg.loc[rr_int_agg['flow (MW)']>0.01] # Keep only positive flows
rr_int_agg.sort_values(ascending=False,by='flow (MW)')
rr_int_agg = rr_int_agg/1000 # MWh -> GWh
data_out = rr_int_agg.copy()
data_out.rename(columns={'flow (MW)':'{} flow (GWh)'.format(scenario)},inplace=True)
max_flow = max(rr_int_agg['flow (MW)'])
rr_int_agg = rr_int_agg.unstack('child')
rr_int_agg = rr_int_agg.droplevel(level = 0, axis = 1)
current_cmap = plt.cm.get_cmap()
current_cmap.set_bad(color='grey')
axs[n].imshow(rr_int_agg)
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))
axs[n].set_xticklabels(rr_int_agg.columns)
axs[n].set_yticklabels(rr_int_agg.index)
axs[n].set_title(scenario.replace('_',' '),fontweight='bold')
# Rotate the tick labels and set their alignment.
plt.setp(axs[n].get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor")
#Delineate the boxes and make room at top and bottom
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]+1)-.5, minor=True)
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]+1)-.5, minor=True)
axs[n].grid(which="minor", color="k", linestyle='-', linewidth=1)
axs[n].tick_params(which="minor", bottom=False, left=False)
max_flow_group.append(max_flow)
Data_Out.append(data_out)
n+=1
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
cmap = cm.inferno
norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))
cax = plt.axes([0.90, 0.1, 0.035, 0.8])
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax, label='Total Net Interchange [GWh]')
plt.xlabel('To Region', color='black', rotation='horizontal',
labelpad=40)
plt.ylabel('From Region', color='black', rotation='vertical',
labelpad=40)
Data_Table_Out = pd.concat(Data_Out,axis=1)
save_figures = os.path.join(self.figure_folder, f"{self.AGG_BY}_transmission")
fig.savefig(os.path.join(save_figures, "region_region_checkerboard.svg"),
dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "region_region_checkerboard.csv"))
outputs = DataSavedInModule()
return outputs
def line_violations_timeseries(self, **kwargs):
"""Creates a timeseries line plot of lineflow violations for each region.
The magnitude of each violation is plotted on the y-axis
Each sceanrio is plotted as a separate line.
This methods calls _violations() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(**kwargs)
return outputs
def line_violations_totals(self, **kwargs):
"""Creates a barplot of total lineflow violations for each region.
Each sceanrio is plotted as a separate bar.
This methods calls _violations() and passes the total_violations=True argument
to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(total_violations=True, **kwargs)
return outputs
def _violations(self, total_violations: bool = False,
timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates line violation plots, line plot and barplots
This methods is called from line_violations_timeseries() and line_violations_totals()
Args:
total_violations (bool, optional): If True finds the sum of violations.
Used to create barplots. Defaults to False.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Violation",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f'Zone = {zone_input}')
all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {str(scenario)}")
if self.AGG_BY == 'zone':
lines = self.meta.zone_lines(scenario)
else:
lines = self.meta.region_lines(scenario)
line_v = self["line_Violation"].get(scenario)
line_v = line_v.reset_index()
viol = line_v.merge(lines,on = 'line_name',how = 'left')
if self.AGG_BY == 'zone':
viol = viol.groupby(["timestamp", "zone"]).sum()
else:
viol = viol.groupby(["timestamp", self.AGG_BY]).sum()
one_zone = viol.xs(zone_input, level = self.AGG_BY)
one_zone = one_zone.rename(columns = {0 : scenario})
one_zone = one_zone.abs() #We don't care the direction of the violation
all_scenarios = pd.concat([all_scenarios,one_zone], axis = 1)
all_scenarios.columns = all_scenarios.columns.str.replace('_',' ')
#remove columns that are all equal to 0
all_scenarios = all_scenarios.loc[:, (all_scenarios != 0).any(axis=0)]
if all_scenarios.empty:
outputs[zone_input] = MissingZoneData()
continue
unitconversion = self.capacity_energy_unitconversion(all_scenarios)
all_scenarios = all_scenarios/unitconversion['divisor']
Data_Table_Out = all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
if total_violations==True:
all_scenarios_tot = all_scenarios.sum()
all_scenarios_tot.plot.bar(stacked=False, rot=0,
color=[color_dict.get(x, '#333333') for x in all_scenarios_tot.index],
linewidth='0.1', width=0.35, ax=ax)
else:
for column in all_scenarios:
mplt.lineplot(all_scenarios,column,color=color_dict,label=column)
ax.margins(x=0.01)
mplt.set_subplot_timeseries_format(minticks=6,maxticks=12)
ax.set_xlabel(timezone, color='black', rotation='horizontal')
mplt.add_legend()
if mconfig.parser("plot_title_as_region"):
fig.set_title(zone_input)
ax.set_ylabel(f"Line violations ({unitconversion['units']})", color='black', rotation='vertical')
outputs[zone_input] = {'fig': fig,'data_table':Data_Table_Out}
return outputs
def net_export(self, timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""creates a timeseries net export line graph.
Scenarios are plotted as separate lines.
Args:
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
net_export_all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
net_export_read = self[f"{agg}_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
net_export_read = self.adjust_for_leapday(net_export_read)
net_export = net_export_read.xs(zone_input, level = self.AGG_BY)
net_export = net_export.groupby("timestamp").sum()
net_export.columns = [scenario]
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
net_export_all_scenarios = pd.concat([net_export_all_scenarios,net_export], axis = 1)
net_export_all_scenarios.columns = net_export_all_scenarios.columns.str.replace('_', ' ')
unitconversion = self.capacity_energy_unitconversion(net_export_all_scenarios)
net_export_all_scenarios = net_export_all_scenarios/unitconversion["divisor"]
# Data table of values to return to main program
Data_Table_Out = net_export_all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(net_export_all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
if net_export_all_scenarios.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
for column in net_export_all_scenarios:
mplt.lineplot(net_export_all_scenarios,column,color_dict, label=column)
ax.set_ylabel(f'Net exports ({unitconversion["units"]})', color='black',
rotation='vertical')
ax.set_xlabel(timezone, color='black', rotation='horizontal')
ax.margins(x=0.01)
ax.hlines(y=0, xmin=ax.get_xlim()[0], xmax=ax.get_xlim()[1],
linestyle=':')
mplt.set_subplot_timeseries_format()
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def zonal_interchange(self, figure_name: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of the net interchange between each zone, with a facet for each zone.
The method will only work if agg_by = "zone".
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY not in ["zone", "zones", "Zone", "Zones"]:
self.logger.warning("This plot only supports aggregation zone")
return UnsupportedAggregation()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.5)
net_exports_all = []
for n, scenario in enumerate(self.Scenarios):
net_exports = []
exp_lines = self.meta.zone_exporting_lines(scenario)
imp_lines = self.meta.zone_importing_lines(scenario)
if exp_lines.empty or imp_lines.empty:
return MissingMetaData()
exp_lines.columns = ['region','line_name']
imp_lines.columns = ['region','line_name']
#Find list of lines that connect each region.
exp_oz = exp_lines[exp_lines['region'] == zone_input]
imp_oz = imp_lines[imp_lines['region'] == zone_input]
other_zones = self.meta.zones(scenario).name.tolist()
try:
other_zones.remove(zone_input)
except:
self.logger.warning("Are you sure you set agg_by = zone?")
self.logger.info(f"Scenario = {str(scenario)}")
flow = self["line_Flow"][scenario].copy()
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
flow = flow.reset_index()
for other_zone in other_zones:
exp_other_oz = exp_lines[exp_lines['region'] == other_zone]
imp_other_oz = imp_lines[imp_lines['region'] == other_zone]
exp_pair = pd.merge(exp_oz, imp_other_oz, left_on='line_name',
right_on='line_name')
imp_pair = pd.merge(imp_oz, exp_other_oz, left_on='line_name',
right_on='line_name')
#Swap columns for importing lines
imp_pair = imp_pair.reindex(columns=['region_from', 'line_name', 'region_to'])
export = flow[flow['line_name'].isin(exp_pair['line_name'])]
imports = flow[flow['line_name'].isin(imp_pair['line_name'])]
export = export.groupby(['timestamp']).sum()
imports = imports.groupby(['timestamp']).sum()
#Check for situations where there are only exporting or importing lines for this zonal pair.
if imports.empty:
net_export = export
elif export.empty:
net_export = -imports
else:
net_export = export - imports
net_export.columns = [other_zone]
if pd.notna(start_date_range):
if other_zone == [other_zones[0]]:
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
if duration_curve:
net_export = self.sort_duration(net_export,other_zone)
net_exports.append(net_export)
net_exports = pd.concat(net_exports,axis = 1)
net_exports = net_exports.dropna(axis = 'columns')
net_exports.index = pd.to_datetime(net_exports.index)
net_exports['Net export'] = net_exports.sum(axis = 1)
# unitconversion based off peak export hour, only checked once
if zone_input == self.Zones[0] and scenario == self.Scenarios[0]:
unitconversion = self.capacity_energy_unitconversion(net_exports)
net_exports = net_exports / unitconversion['divisor']
if duration_curve:
net_exports = net_exports.reset_index().drop(columns = 'index')
for column in net_exports:
linestyle = '--' if column == 'Net export' else 'solid'
mplt.lineplot(net_exports, column=column, label=column,
sub_pos=n, linestyle=linestyle)
axs[n].margins(x=0.01)
#Add horizontal line at 0.
axs[n].hlines(y=0, xmin=axs[n].get_xlim()[0], xmax=axs[n].get_xlim()[1],
linestyle=':')
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
#Add scenario column to output table.
scenario_names = pd.Series([scenario] * len(net_exports), name='Scenario')
net_exports = net_exports.add_suffix(f" ({unitconversion['units']})")
net_exports = net_exports.set_index([scenario_names], append=True)
net_exports_all.append(net_exports)
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
plt.ylabel(f"Net export ({unitconversion['units']})", color='black',
rotation='vertical', labelpad=40)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
Data_Table_Out = pd.concat(net_exports_all)
# if plotting all scenarios return figures to plot_main
outputs[zone_input] = {'fig': fig,'data_table' : Data_Table_Out}
return outputs
def zonal_interchange_total(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a barplot of the net interchange between each zone, separated by positive and negative flows.
The method will only work if agg_by = "zone".
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY not in ["zone", "zones", "Zone", "Zones"]:
self.logger.warning("This plot only supports aggregation zone")
return UnsupportedAggregation()
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
net_exports_all = []
# Holds each scenario output table
data_out_chunk = []
for n, scenario in enumerate(self.Scenarios):
exp_lines = self.meta.zone_exporting_lines(scenario)
imp_lines = self.meta.zone_importing_lines(scenario)
if exp_lines.empty or imp_lines.empty:
return MissingMetaData()
exp_lines.columns = ['region', 'line_name']
imp_lines.columns = ['region', 'line_name']
#Find list of lines that connect each region.
exp_oz = exp_lines[exp_lines['region'] == zone_input]
imp_oz = imp_lines[imp_lines['region'] == zone_input]
other_zones = self.meta.zones(scenario).name.tolist()
other_zones.remove(zone_input)
net_exports = []
self.logger.info(f"Scenario = {str(scenario)}")
flow = self["line_Flow"][scenario]
flow = flow.reset_index()
for other_zone in other_zones:
exp_other_oz = exp_lines[exp_lines['region'] == other_zone]
imp_other_oz = imp_lines[imp_lines['region'] == other_zone]
exp_pair = pd.merge(exp_oz, imp_other_oz, left_on='line_name',
right_on='line_name')
imp_pair = pd.merge(imp_oz, exp_other_oz, left_on='line_name',
right_on='line_name')
#Swap columns for importing lines
imp_pair = imp_pair.reindex(columns=['region_from', 'line_name', 'region_to'])
export = flow[flow['line_name'].isin(exp_pair['line_name'])]
imports = flow[flow['line_name'].isin(imp_pair['line_name'])]
export = export.groupby(['timestamp']).sum()
imports = imports.groupby(['timestamp']).sum()
#Check for situations where there are only exporting or importing lines for this zonal pair.
if imports.empty:
net_export = export
elif export.empty:
net_export = -imports
else:
net_export = export - imports
net_export.columns = [other_zone]
if pd.notna(start_date_range):
if other_zone == other_zones[0]:
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
net_exports.append(net_export)
net_exports = | pd.concat(net_exports, axis=1) | pandas.concat |
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# TODO need to check what kind of predictions XGBoost is performing (probability vs classes etc)
# TODO need to make sure that both DAAL and XGBoost are using doubles
RunningInferenceProfiling = True
import os
from numpy.core.fromnumeric import mean
# This is a hack to make XGBoost run single threaded.
if RunningInferenceProfiling:
os.environ['OMP_NUM_THREADS'] = "1"
import sys
scriptPath = os.path.realpath(__file__)
scriptDir = os.path.dirname(scriptPath)
benchDir = os.path.dirname(scriptDir)
sys.path = sys.path + [benchDir]
import argparse
import bench
import numpy as np
import pandas as pd
import xgboost as xgb
import time
import math
import daal4py as d4p
from sklearn.metrics import mean_squared_error
# These things printed on the screen will cause a test failure. Ignore them when you're running the profiling.
if RunningInferenceProfiling==True:
print("Starting daal xgboost benchmark. PID : ", os.getpid())
d4p.daalinit(1)
print("Running DAAL with ", d4p.num_threads(), " threads.")
def convert_probs_to_classes(y_prob):
return np.array([np.argmax(y_prob[i]) for i in range(y_prob.shape[0])])
def convert_xgb_predictions(y_pred, objective):
if objective == 'multi:softprob':
y_pred = convert_probs_to_classes(y_pred)
elif objective == 'binary:logistic':
y_pred = y_pred.astype(np.int32)
return y_pred
logfile = open("xgb_daal_bench_log.txt", "w")
parser = argparse.ArgumentParser(description='xgboost gradient boosted trees DAAL inference benchmark')
parser.add_argument('--colsample-bytree', type=float, default=1,
help='Subsample ratio of columns '
'when constructing each tree')
parser.add_argument('--count-dmatrix', default=False, action='store_true',
help='Count DMatrix creation in time measurements')
parser.add_argument('--enable-experimental-json-serialization', default=True,
choices=('True', 'False'), help='Use JSON to store memory snapshots')
parser.add_argument('--grow-policy', type=str, default='depthwise',
help='Controls a way new nodes are added to the tree')
parser.add_argument('--inplace-predict', default=False, action='store_true',
help='Perform inplace_predict instead of default')
parser.add_argument('--learning-rate', '--eta', type=float, default=0.3,
help='Step size shrinkage used in update '
'to prevents overfitting')
parser.add_argument('--max-bin', type=int, default=256,
help='Maximum number of discrete bins to '
'bucket continuous features')
parser.add_argument('--max-delta-step', type=float, default=0,
help='Maximum delta step we allow each leaf output to be')
parser.add_argument('--max-depth', type=int, default=6,
help='Maximum depth of a tree')
parser.add_argument('--max-leaves', type=int, default=0,
help='Maximum number of nodes to be added')
parser.add_argument('--min-child-weight', type=float, default=1,
help='Minimum sum of instance weight needed in a child')
parser.add_argument('--min-split-loss', '--gamma', type=float, default=0,
help='Minimum loss reduction required to make'
' partition on a leaf node')
parser.add_argument('--n-estimators', type=int, default=100,
help='The number of gradient boosted trees')
parser.add_argument('--objective', type=str, required=True,
choices=('reg:squarederror', 'binary:logistic',
'multi:softmax', 'multi:softprob'),
help='Specifies the learning task')
parser.add_argument('--reg-alpha', type=float, default=0,
help='L1 regularization term on weights')
parser.add_argument('--reg-lambda', type=float, default=1,
help='L2 regularization term on weights')
parser.add_argument('--scale-pos-weight', type=float, default=1,
help='Controls a balance of positive and negative weights')
parser.add_argument('--single-precision-histogram', default=False, action='store_true',
help='Build histograms instead of double precision')
parser.add_argument('--subsample', type=float, default=1,
help='Subsample ratio of the training instances')
parser.add_argument('--tree-method', type=str, required=True,
help='The tree construction algorithm used in XGBoost')
params = bench.parse_args(parser)
# Default seed
if params.seed == 12345:
params.seed = 0
model_file_path = 'xgb_models/{dataset_name}_xgb_model_save.json'.format(dataset_name = params.dataset_name)
# Load and convert data
X_train, X_test, y_train, y_test = bench.load_data(params)
# print("Done loading test data...")
logfile.write("X_train shape : {shape}\n".format(shape = X_train.shape))
# print("X_train has inf : ", np.isinf(X_train).any().any())
logfile.write("y_train shape : {shape}\n".format(shape = y_train.shape))
logfile.write("X_test shape : {shape}\n".format(shape = X_test.shape))
logfile.write("y_test shape : {shape}\n".format(shape = y_test.shape))
xgb_params = {
'booster': 'gbtree',
'verbosity': 0,
'learning_rate': params.learning_rate,
'min_split_loss': params.min_split_loss,
'max_depth': params.max_depth,
'min_child_weight': params.min_child_weight,
'max_delta_step': params.max_delta_step,
'subsample': params.subsample,
'sampling_method': 'uniform',
'colsample_bytree': params.colsample_bytree,
'colsample_bylevel': 1,
'colsample_bynode': 1,
'reg_lambda': params.reg_lambda,
'reg_alpha': params.reg_alpha,
'tree_method': params.tree_method,
'scale_pos_weight': params.scale_pos_weight,
'grow_policy': params.grow_policy,
'max_leaves': params.max_leaves,
'max_bin': params.max_bin,
'objective': params.objective,
'seed': params.seed,
'single_precision_histogram': params.single_precision_histogram,
'enable_experimental_json_serialization':
params.enable_experimental_json_serialization
}
if params.threads != -1:
xgb_params.update({'nthread': params.threads})
if params.objective.startswith('reg'):
task = 'regression'
metric_name, metric_func = 'rmse', bench.rmse_score
def predict(X):
pred = d4p.gbt_regression_prediction().compute(X, daal_model).prediction
return pred
else:
task = 'classification'
metric_name = 'accuracy'
metric_func = bench.accuracy_score
if 'cudf' in str(type(y_train)):
params.n_classes = y_train[y_train.columns[0]].nunique()
else:
params.n_classes = len(np.unique(y_train))
# Covtype has one class more than there is in train
if params.dataset_name == 'covtype':
params.n_classes += 1
if params.n_classes > 2:
xgb_params['num_class'] = params.n_classes
def predict(X):
pred = d4p.gbt_classification_prediction(nClasses = params.n_classes).compute(X, daal_model).prediction
return pred
def RepeatDF(df, n):
newdf = pd.DataFrame(np.repeat(df.values, n, axis=0))
newdf.columns = df.columns
return newdf
def RepeatSeries(s, n):
newSeries = | pd.Series([]) | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type( | pd.Int64Index([]) | pandas.Int64Index |
"""
Procedures needed for IATE estimation.
Created on Thu Dec 8 15:48:57 2020.
@author: MLechner
# -*- coding: utf-8 -*-
"""
from concurrent import futures
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import scipy.stats as sct
import matplotlib.pyplot as plt
import ray
from mcf import mcf_ate_functions as mcf_ate
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
from mcf import general_purpose_mcf as gp_mcf
def iate_est_mp(weights, data_file, y_dat, cl_dat, w_dat, v_dict, c_dict,
w_ate=None, balancing_test=False, save_predictions=True,
lc_forest=None):
"""
Estimate IATE and their standard errors, plot & save them, MP version.
Parameters
----------
weights : List of lists. For every obs, positive weights are saved.
Alternative: Sparse csr-Matrix.
pred_data : String. csv-file with data to make predictions for.
y : Numpy array. All outcome variables.
cl : Numpy array. Cluster variable.
w : Numpy array. Sampling weights.
v : Dict. Variables.
c : Dict. Parameters.
w_ate: Numpy array. Weights of ATE estimation. Default = None.
balancing_test : Bool. Balancing test. Default = False.
save_predictions : Bool. save_predictions = True.
Returns
-------
post_estimation_file : String. Name of files with predictions.
pot_y : Numpy array. Potential outcomes.
pot_y_var: Numpy array. Standard errors of potential outcomes.
iate: Numpy array. IATEs.
iate_se: Numpy array. Standard errors of IATEs.
(names_pot_iate, names_pot_iate0): Tuple of list of strings.
names_pot_iate: List of strings: All names of IATEs in file.
names_pot_iate0: Only those names related to first category.
"""
def warn_text(c_dict):
if c_dict['with_output'] and c_dict['verbose']:
print('If prediction file is large, this step may take long. If ',
'nothing seems to happen, it may be worth to try do the ',
'estimation without sparse weight matrix. This needs more '
'memory, but could be substantially faster ',
'(weight_as_sparse = False).')
if c_dict['with_output'] and c_dict['verbose'] and save_predictions:
print('\nComputing IATEs 1/2 (potential outcomes)')
if c_dict['weight_as_sparse']:
n_x = weights[0].shape[0]
else:
n_x = len(weights)
n_y = len(y_dat)
no_of_out = len(v_dict['y_name'])
larger_0 = np.zeros(c_dict['no_of_treat'])
equal_0 = np.zeros_like(larger_0)
mean_pos = np.zeros_like(larger_0)
std_pos = np.zeros_like(larger_0)
gini_all = np.zeros_like(larger_0)
gini_pos = np.zeros_like(larger_0)
share_censored = np.zeros_like(larger_0)
share_largest_q = np.zeros((c_dict['no_of_treat'], 3))
sum_larger = np.zeros((c_dict['no_of_treat'], len(c_dict['q_w'])))
obs_larger = np.zeros_like(sum_larger)
pot_y = np.empty((n_x, c_dict['no_of_treat'], no_of_out))
if c_dict['iate_se_flag']:
pot_y_var = np.empty_like(pot_y)
pot_y_m_ate = np.empty_like(pot_y)
pot_y_m_ate_var = np.empty_like(pot_y)
else:
pot_y_var = pot_y_m_ate = pot_y_m_ate_var = w_ate = None
if w_ate is not None:
w_ate = w_ate[0, :, :]
if not c_dict['w_yes']:
w_dat = None
if c_dict['iate_se_flag']:
no_of_cluster = len(np.unique(cl_dat)) if c_dict['cluster_std'
] else None
else:
no_of_cluster = None
l1_to_9 = [None] * n_x
if c_dict['no_parallel'] < 1.5:
maxworkers = 1
else:
if c_dict['mp_automatic']:
maxworkers = gp_mcf.find_no_of_workers(c_dict['no_parallel'],
c_dict['sys_share'])
else:
maxworkers = c_dict['no_parallel']
if c_dict['with_output'] and c_dict['verbose']:
print('Number of parallel processes: ', maxworkers)
if c_dict['weight_as_sparse']:
iterator = len(weights)
if maxworkers == 1:
for idx in range(n_x):
if c_dict['weight_as_sparse']:
weights_idx = [weights[t_idx].getrow(idx) for
t_idx in range(iterator)]
else:
weights_idx = weights[idx]
ret_all_i = iate_func1_for_mp(
idx, weights_idx, cl_dat, no_of_cluster, w_dat, w_ate, y_dat,
no_of_out, n_y, c_dict)
(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9,
share_censored) = assign_ret_all_i(
pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9,
share_censored, ret_all_i, n_x, idx)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_x)
else:
if c_dict['obs_by_obs']: # this is currently not used, too slow
if c_dict['mp_with_ray']:
if c_dict['mem_object_store_3'] is None:
if not ray.is_initialized():
ray.init(num_cpus=maxworkers, include_dashboard=False)
else:
if not ray.is_initialized():
ray.init(
num_cpus=maxworkers, include_dashboard=False,
object_store_memory=c_dict['mem_object_store_3'])
if c_dict['with_output'] and c_dict['verbose']:
print("Size of Ray Object Store: ", round(
c_dict['mem_object_store_3']/(1024*1024)), " MB")
if c_dict['weight_as_sparse']:
still_running = [ray_iate_func1_for_mp.remote(
idx, [weights[t_idx].getrow(idx) for t_idx in
range(iterator)], cl_dat, no_of_cluster, w_dat,
w_ate, y_dat, no_of_out, n_y, c_dict)
for idx in range(n_x)]
warn_text(c_dict)
else:
still_running = [ray_iate_func1_for_mp.remote(
idx, weights[idx], cl_dat, no_of_cluster, w_dat,
w_ate, y_dat, no_of_out, n_y, c_dict)
for idx in range(n_x)]
jdx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for ret_all_i in finished_res:
(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored) = assign_ret_all_i(
pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored, ret_all_i, n_x)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, n_x)
jdx += 1
if 'rest' in c_dict['_mp_ray_del']:
del finished_res, finished
if c_dict['_mp_ray_shutdown']:
ray.shutdown()
else:
with futures.ProcessPoolExecutor(max_workers=maxworkers
) as fpp:
if c_dict['weight_as_sparse']:
ret_fut = {fpp.submit(
iate_func1_for_mp, idx,
[weights[t_idx].getrow(idx) for t_idx in
range(iterator)], cl_dat, no_of_cluster, w_dat,
w_ate, y_dat, no_of_out, n_y, c_dict):
idx for idx in range(n_x)}
warn_text(c_dict)
else:
ret_fut = {fpp.submit(
iate_func1_for_mp, idx, weights[idx], cl_dat,
no_of_cluster, w_dat, w_ate, y_dat, no_of_out, n_y,
c_dict): idx for idx in range(n_x)}
for jdx, frv in enumerate(futures.as_completed(ret_fut)):
ret_all_i = frv.result()
del ret_fut[frv]
del frv
(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored) = assign_ret_all_i(
pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored, ret_all_i, n_x)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, n_x)
else:
rows_per_split = c_dict['max_elements_per_split'] / n_y
no_of_splits = round(n_x / rows_per_split)
no_of_splits = max(no_of_splits, maxworkers)
no_of_splits = min(no_of_splits, n_x)
if c_dict['with_output'] and c_dict['verbose']:
print('IATE-1: Avg. number of obs per split: {:5.2f}.'.format(
n_x / no_of_splits), ' Number of splits: ', no_of_splits)
obs_idx_list = np.array_split(np.arange(n_x), no_of_splits)
if c_dict['mp_with_ray']:
if c_dict['mem_object_store_3'] is None:
if not ray.is_initialized():
ray.init(num_cpus=maxworkers, include_dashboard=False)
else:
if not ray.is_initialized():
ray.init(
num_cpus=maxworkers, include_dashboard=False,
object_store_memory=c_dict['mem_object_store_3'])
if c_dict['with_output'] and c_dict['verbose']:
print("Size of Ray Object Store: ", round(
c_dict['mem_object_store_3']/(1024*1024)), " MB")
if c_dict['weight_as_sparse']:
still_running = [ray_iate_func1_for_mp_many_obs.remote(
idx, [weights[t_idx][idx, :] for t_idx in
range(iterator)], cl_dat, no_of_cluster,
w_dat, w_ate, y_dat, no_of_out, n_y, c_dict)
for idx in obs_idx_list]
warn_text(c_dict)
else:
still_running = [ray_iate_func1_for_mp_many_obs.remote(
idx, [weights[idxx] for idxx in idx], cl_dat,
no_of_cluster, w_dat, w_ate, y_dat, no_of_out, n_y,
c_dict) for idx in obs_idx_list]
jdx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for ret_all_i_list in finished_res:
for ret_all_i in ret_all_i_list:
(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored) = assign_ret_all_i(
pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored, ret_all_i, n_x)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, no_of_splits)
jdx += 1
if 'rest' in c_dict['_mp_ray_del']:
del finished_res, finished
if c_dict['_mp_ray_shutdown']:
ray.shutdown()
else:
with futures.ProcessPoolExecutor(max_workers=maxworkers
) as fpp:
if c_dict['weight_as_sparse']:
ret_fut = {fpp.submit(
iate_func1_for_mp_many_obs, idx,
[weights[t_idx][idx, :] for t_idx in
range(iterator)], cl_dat, no_of_cluster, w_dat,
w_ate, y_dat, no_of_out, n_y, c_dict):
idx for idx in obs_idx_list}
else:
ret_fut = {fpp.submit(
iate_func1_for_mp_many_obs, idx,
[weights[idxx] for idxx in idx], cl_dat,
no_of_cluster, w_dat, w_ate, y_dat, no_of_out,
n_y, c_dict): idx for idx in obs_idx_list}
for jdx, frv in enumerate(futures.as_completed(ret_fut)):
ret_all_i_list = frv.result()
del ret_fut[frv]
del frv
for ret_all_i in ret_all_i_list:
(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored) = assign_ret_all_i(
pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var,
l1_to_9, share_censored, ret_all_i, n_x)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, no_of_splits)
for idx in range(n_x):
larger_0 += l1_to_9[idx][0]
equal_0 += l1_to_9[idx][1]
mean_pos += l1_to_9[idx][2]
std_pos += l1_to_9[idx][3]
gini_all += l1_to_9[idx][4]
gini_pos += l1_to_9[idx][5]
share_largest_q += l1_to_9[idx][6]
sum_larger += l1_to_9[idx][7]
obs_larger += l1_to_9[idx][8]
if c_dict['with_output'] and (not balancing_test) and save_predictions:
print('\n')
print('=' * 80)
print('Analysis of weights (normalised to add to 1): ', 'IATE',
'(stats are averaged over all effects)')
mcf_ate.print_weight_stat(
larger_0 / n_x, equal_0 / n_x, mean_pos / n_x, std_pos / n_x,
gini_all / n_x, gini_pos / n_x, share_largest_q / n_x,
sum_larger / n_x, obs_larger / n_x, c_dict, share_censored)
if c_dict['with_output'] and c_dict['verbose'] and save_predictions:
print('\nComputing IATEs 2/2 (effects)')
dim_3 = round(c_dict['no_of_treat'] * (c_dict['no_of_treat'] - 1) / 2)
iate = np.empty((n_x, no_of_out, dim_3, 2))
if c_dict['iate_se_flag']:
iate_se = np.empty_like(iate)
iate_p = np.empty_like(iate)
else:
iate_se = iate_p = None
# obs x outcome x effects x type_of_effect
if c_dict['no_parallel'] < 1.5:
maxworkers = 1
else:
if c_dict['mp_automatic']:
maxworkers = gp_mcf.find_no_of_workers(c_dict['no_parallel'],
c_dict['sys_share'])
else:
maxworkers = c_dict['no_parallel']
if c_dict['with_output'] and c_dict['verbose']:
print('Number of parallel processes: ', maxworkers)
if maxworkers == 1:
for idx in range(n_x):
if c_dict['iate_se_flag']:
ret_all_idx = iate_func2_for_mp(
idx, no_of_out, pot_y[idx], pot_y_var[idx],
pot_y_m_ate[idx], pot_y_m_ate_var[idx], c_dict)
else:
ret_all_idx = iate_func2_for_mp(
idx, no_of_out, pot_y[idx], None, None, None, c_dict)
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(idx+1, n_x)
iate[idx, :, :, :] = ret_all_idx[1]
if c_dict['iate_se_flag']:
iate_se[idx, :, :, :] = ret_all_idx[2]
iate_p[idx, :, :, :] = ret_all_idx[3]
if idx == n_x-1:
effect_list = ret_all_idx[4]
else:
if c_dict['mp_with_ray']:
if c_dict['mem_object_store_3'] is None:
if not ray.is_initialized():
ray.init(num_cpus=maxworkers, include_dashboard=False)
else:
if not ray.is_initialized():
ray.init(
num_cpus=maxworkers, include_dashboard=False,
object_store_memory=c_dict['mem_object_store_3'])
if c_dict['with_output'] and c_dict['verbose']:
print("Size of Ray Object Store: ", round(
c_dict['mem_object_store_3']/(1024*1024)), " MB")
if c_dict['iate_se_flag']:
still_running = [ray_iate_func2_for_mp.remote(
idx, no_of_out, pot_y[idx], pot_y_var[idx],
pot_y_m_ate[idx], pot_y_m_ate_var[idx], c_dict)
for idx in range(n_x)]
else:
still_running = [ray_iate_func2_for_mp.remote(
idx, no_of_out, pot_y[idx], None, None, None, c_dict)
for idx in range(n_x)]
jdx = 0
while len(still_running) > 0:
finished, still_running = ray.wait(still_running)
finished_res = ray.get(finished)
for ret_all_i2 in finished_res:
iix = ret_all_i2[0]
iate[iix, :, :, :] = ret_all_i2[1]
if c_dict['iate_se_flag']:
iate_se[iix, :, :, :] = ret_all_i2[2]
iate_p[iix, :, :, :] = ret_all_i2[3]
if jdx == n_x-1:
effect_list = ret_all_i2[4]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, n_x)
jdx += 1
if 'rest' in c_dict['_mp_ray_del']:
del finished_res, finished
if c_dict['_mp_ray_shutdown']:
ray.shutdown()
else:
with futures.ProcessPoolExecutor(max_workers=maxworkers) as fpp:
ret_fut = {fpp.submit(
iate_func2_for_mp, idx, no_of_out, pot_y[idx],
pot_y_var[idx], pot_y_m_ate[idx], pot_y_m_ate_var[idx],
c_dict): idx for idx in range(n_x)}
for jdx, frv in enumerate(futures.as_completed(ret_fut)):
ret_all_i2 = frv.result()
del ret_fut[frv]
del frv
iix = ret_all_i2[0]
iate[iix, :, :, :] = ret_all_i2[1]
if c_dict['iate_se_flag']:
iate_se[iix, :, :, :] = ret_all_i2[2]
iate_p[iix, :, :, :] = ret_all_i2[3]
if jdx == n_x-1:
effect_list = ret_all_i2[4]
if c_dict['with_output'] and c_dict['verbose']:
gp.share_completed(jdx+1, n_x)
if c_dict['with_output'] and save_predictions:
print_iate(iate, iate_se, iate_p, effect_list, v_dict, c_dict)
# Add results to data file
pot_y_np = np.empty((n_x, no_of_out * c_dict['no_of_treat']))
if c_dict['iate_se_flag']:
pot_y_se_np = np.empty_like(pot_y_np)
dim = round(no_of_out * c_dict['no_of_treat'] * (
c_dict['no_of_treat'] - 1) / 2)
iate_np = np.empty((n_x, dim))
if c_dict['iate_se_flag']:
iate_se_np = np.empty_like(iate_np)
iate_mate_np = np.empty_like(iate_np)
iate_mate_se_np = np.empty_like(iate_np)
jdx = j2dx = 0
name_pot = []
name_eff = []
name_eff0 = []
for o_idx, o_name in enumerate(v_dict['y_name']):
for t_idx, t_name in enumerate(c_dict['d_values']):
name_pot += [o_name + str(t_name)]
pot_y_np[:, jdx] = pot_y[:, t_idx, o_idx]
if c_dict['iate_se_flag']:
pot_y_se_np[:, jdx] = np.sqrt(pot_y_var[:, t_idx, o_idx])
jdx += 1
for t2_idx, t2_name in enumerate(effect_list):
name_eff += [o_name + str(t2_name[0]) + 'vs' + str(t2_name[1])]
if t2_name[1] == c_dict['d_values'][0]: # Usually, control
name_eff0 += [
o_name + str(t2_name[0]) + 'vs' + str(t2_name[1])]
iate_np[:, j2dx] = iate[:, o_idx, t2_idx, 0]
if c_dict['iate_se_flag']:
iate_se_np[:, j2dx] = iate_se[:, o_idx, t2_idx, 0]
iate_mate_np[:, j2dx] = iate[:, o_idx, t2_idx, 1]
iate_mate_se_np[:, j2dx] = iate_se[:, o_idx, t2_idx, 1]
j2dx += 1
name_pot_y = [s + '_pot' for s in name_pot]
name_iate = [s + '_iate' for s in name_eff]
name_iate0 = [s + '_iate' for s in name_eff0]
if c_dict['iate_se_flag']:
name_pot_y_se = [s + '_pot_se' for s in name_pot]
name_iate_se = [s + '_iate_se' for s in name_eff]
name_iate_mate = [s + '_iatemate' for s in name_eff]
name_iate_mate_se = [s + '_iatemate_se' for s in name_eff]
name_iate_se0 = [s + '_iate_se' for s in name_eff0]
name_iate_mate0 = [s + '_iatemate' for s in name_eff0]
name_iate_mate_se0 = [s + '_iatemate_se' for s in name_eff0]
else:
name_pot_y_se = name_iate_se = name_iate_mate = None
name_iate_mate_se = name_iate_se0 = name_iate_mate0 = None
name_iate_mate_se0 = None
if c_dict['with_output'] and save_predictions:
pot_y_df = pd.DataFrame(data=pot_y_np, columns=name_pot_y)
iate_df = pd.DataFrame(data=iate_np, columns=name_iate)
if c_dict['iate_se_flag']:
pot_y_se_df = pd.DataFrame(data=pot_y_se_np, columns=name_pot_y_se)
iate_se_df = pd.DataFrame(data=iate_se_np, columns=name_iate_se)
iate_mate_df = pd.DataFrame(data=iate_mate_np,
columns=name_iate_mate)
iate_mate_se_df = pd.DataFrame(data=iate_mate_se_np,
columns=name_iate_mate_se)
data_df = pd.read_csv(data_file)
if c_dict['iate_se_flag']:
df_list = [data_df, pot_y_df, pot_y_se_df, iate_df, iate_se_df,
iate_mate_df, iate_mate_se_df]
else:
df_list = [data_df, pot_y_df, iate_df]
data_file_new = pd.concat(df_list, axis=1)
gp.delete_file_if_exists(c_dict['pred_sample_with_pred'])
data_file_new.to_csv(c_dict['pred_sample_with_pred'], index=False)
if c_dict['with_output']:
gp.print_descriptive_stats_file(
c_dict['pred_sample_with_pred'], 'all',
c_dict['print_to_file'])
names_pot_iate = {'names_pot_y': name_pot_y,
'names_pot_y_se': name_pot_y_se,
'names_iate': name_iate,
'names_iate_se': name_iate_se,
'names_iate_mate': name_iate_mate,
'names_iate_mate_se': name_iate_mate_se}
names_pot_iate0 = {'names_pot_y': name_pot_y,
'names_pot_y_se': name_pot_y_se,
'names_iate': name_iate0,
'names_iate_se': name_iate_se0,
'names_iate_mate': name_iate_mate0,
'names_iate_mate_se': name_iate_mate_se0}
return (c_dict['pred_sample_with_pred'], pot_y, pot_y_var, iate, iate_se,
(names_pot_iate, names_pot_iate0))
def assign_ret_all_i(pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9,
share_censored, ret_all_i, n_x, idx=None):
"""Use to avoid duplicate code."""
if idx is None:
idx = ret_all_i[0]
pot_y[idx, :, :] = ret_all_i[1]
if pot_y_var is not None:
pot_y_var[idx, :, :] = ret_all_i[2]
pot_y_m_ate[idx, :, :] = ret_all_i[3]
pot_y_m_ate_var[idx, :, :] = ret_all_i[4]
l1_to_9[idx] = ret_all_i[5]
share_censored += ret_all_i[6] / n_x
return (pot_y, pot_y_var, pot_y_m_ate, pot_y_m_ate_var, l1_to_9,
share_censored)
@ray.remote
def ray_iate_func2_for_mp(idx, no_of_out, pot_y_i, pot_y_var_i, pot_y_m_ate_i,
pot_y_m_ate_var_i, c_dict):
"""Make function compatible with Ray."""
return iate_func2_for_mp(idx, no_of_out, pot_y_i, pot_y_var_i,
pot_y_m_ate_i, pot_y_m_ate_var_i, c_dict)
def iate_func2_for_mp(idx, no_of_out, pot_y_i, pot_y_var_i, pot_y_m_ate_i,
pot_y_m_ate_var_i, c_dict):
"""
Do computations for IATE with MP. Second chunck.
Parameters
----------
i : Int. Counter.
no_of_out : Int. Number of outcomes.
pot_y_i : Numpy array.
pot_y_var_i : Numpy array.
pot_y_m_ate_i : Numpy array.
pot_y_m_ate_var_i : Numpy array.
c : Dict. Parameters.
Returns
-------
iate_i : Numpy array.
iate_se_i : Numpy array.
iate_p_i : Numpy array.
effect_list : List.
"""
# obs x outcome x effects x type_of_effect
dim = (no_of_out, round(c_dict['no_of_treat'] * (
c_dict['no_of_treat'] - 1) / 2), 2)
iate_i = np.empty(dim)
if c_dict['iate_se_flag']:
iate_se_i = np.empty(dim) # obs x outcome x effects x type_of_effect
iate_p_i = np.empty(dim)
iterator = 2
else:
iate_se_i = iate_p_i = None
iterator = 1
for o_i in range(no_of_out):
for jdx in range(iterator):
if jdx == 0:
pot_y_ao = pot_y_i[:, o_i]
if c_dict['iate_se_flag']:
pot_y_var_ao = pot_y_var_i[:, o_i]
else:
pot_y_var_ao = None
else:
pot_y_ao = pot_y_m_ate_i[:, o_i]
pot_y_var_ao = pot_y_m_ate_var_i[:, o_i]
ret = gp_mcf.effect_from_potential(
pot_y_ao, pot_y_var_ao, c_dict['d_values'],
se_yes=c_dict['iate_se_flag'])
if c_dict['iate_se_flag']:
(iate_i[o_i, :, jdx], iate_se_i[o_i, :, jdx], _,
iate_p_i[o_i, :, jdx], effect_list) = ret
else:
(iate_i[o_i, :, jdx], _, _, _, effect_list) = ret
return idx, iate_i, iate_se_i, iate_p_i, effect_list
@ray.remote
def ray_iate_func1_for_mp_many_obs(
idx_list, weights_list, cl_dat, no_of_cluster, w_dat, w_ate, y_dat,
no_of_out, n_y, c_dict):
"""Compute IATE for several obs in one loop (MP)."""
return iate_func1_for_mp_many_obs(
idx_list, weights_list, cl_dat, no_of_cluster, w_dat, w_ate, y_dat,
no_of_out, n_y, c_dict)
def iate_func1_for_mp_many_obs(idx_list, weights_list, cl_dat, no_of_cluster,
w_dat, w_ate, y_dat, no_of_out, n_y, c_dict):
"""Compute IATE for several obs in one loop (MP)."""
ret_all = []
if c_dict['weight_as_sparse']:
iterator = len(weights_list)
for i, idx_org in enumerate(idx_list):
if c_dict['weight_as_sparse']:
weights_i = [weights_list[t_idx].getrow(i)
for t_idx in range(iterator)]
else:
weights_i = weights_list[i]
ret = iate_func1_for_mp(idx_org, weights_i, cl_dat, no_of_cluster,
w_dat, w_ate, y_dat, no_of_out, n_y, c_dict)
ret_all.append(ret)
return ret_all
@ray.remote
def ray_iate_func1_for_mp(idx, weights_i, cl_dat, no_of_cluster, w_dat, w_ate,
y_dat, no_of_out, n_y, c_dict):
"""Make function useful for Ray."""
return iate_func1_for_mp(idx, weights_i, cl_dat, no_of_cluster, w_dat,
w_ate, y_dat, no_of_out, n_y, c_dict)
def iate_func1_for_mp(idx, weights_i, cl_dat, no_of_cluster, w_dat, w_ate,
y_dat, no_of_out, n_y, c_dict):
"""
Compute function to be looped over observations for Multiprocessing.
Parameters
----------
idx : Int. Counter.
weights_i : List of int. Indices of non-zero weights.
Alternative: Sparse csr matrix
cl_dat : Numpy vector. Cluster variable.
no_of_cluster : Int. Number of clusters.
w_dat : Numpy vector. Sampling weights.
w_ate : Numpy array. Weights for ATE.
y_dat : Numpy array. Outcome variable.
no_of_out : Int. Number of outcomes.
n_y : Int. Length of outcome data.
c_dict : Dict. Parameters.
Returns
-------
idx: Int. Counter.
pot_y_i: Numpy array.
pot_y_var_i: Numpy array.
pot_y_m_ate_i: Numpy array.
pot_y_m_ate_var_i: Numpy array.
l1_to_9: Tuple of lists.
"""
if c_dict['with_output'] and (idx == 0) and not c_dict[
'mp_with_ray'] and c_dict['verbose']:
print('Starting to compute IATE - procedure 1', flush=True)
pot_y_i = np.empty((c_dict['no_of_treat'], no_of_out))
share_i = np.zeros(c_dict['no_of_treat'])
if c_dict['iate_se_flag']:
pot_y_var_i = np.empty_like(pot_y_i)
pot_y_m_ate_i = np.empty_like(pot_y_i)
pot_y_m_ate_var_i = np.empty_like(pot_y_i)
cluster_std = c_dict['cluster_std']
else:
pot_y_var_i = pot_y_m_ate_i = pot_y_m_ate_var_i = w_ate = None
cluster_std = False
if cluster_std:
w_add = np.zeros((c_dict['no_of_treat'], no_of_cluster))
else:
w_add = np.zeros((c_dict['no_of_treat'], n_y))
if c_dict['iate_se_flag']:
w_add_unc = np.zeros((c_dict['no_of_treat'], n_y))
for t_idx in range(c_dict['no_of_treat']):
if c_dict['weight_as_sparse']:
w_index = weights_i[t_idx].indices
w_i = weights_i[t_idx].data
else:
w_index = weights_i[t_idx][0] # Indices of non-zero weights
w_i = weights_i[t_idx][1].copy()
if c_dict['w_yes']:
w_t = w_dat[w_index].reshape(-1)
w_i = w_i * w_t
else:
w_t = None
w_i_sum = np.sum(w_i)
if not (1-1e-10) < w_i_sum < (1+1e-10):
w_i = w_i / w_i_sum
w_i_unc = np.copy(w_i)
if c_dict['max_weight_share'] < 1:
w_i, _, share_i[t_idx] = gp_mcf.bound_norm_weights(
w_i, c_dict['max_weight_share'])
if cluster_std:
cl_i = cl_dat[w_index]
w_all_i = np.zeros(n_y)
w_all_i[w_index] = w_i
w_all_i_unc = np.zeros_like(w_all_i)
w_all_i_unc[w_index] = w_i_unc
else:
cl_i = None
for o_idx in range(no_of_out):
ret = gp_est.weight_var(w_i, y_dat[w_index, o_idx], cl_i, c_dict,
weights=w_t, se_yes=c_dict['iate_se_flag'],
bootstrap=c_dict['se_boot_iate'])
pot_y_i[t_idx, o_idx] = ret[0]
if c_dict['iate_se_flag']:
pot_y_var_i[t_idx, o_idx] = ret[1]
if cluster_std:
ret2 = gp_est.aggregate_cluster_pos_w(
cl_dat, w_all_i, y_dat[:, o_idx], sweights=w_dat)
if o_idx == 0:
w_add[t_idx, :] = np.copy(ret2[0])
if c_dict['iate_se_flag']:
if w_ate is None:
w_diff = w_all_i_unc # Dummy if no w_ate
else:
w_diff = w_all_i_unc - w_ate[t_idx, :]
ret = gp_est.weight_var(
w_diff, y_dat[:, o_idx], cl_dat, c_dict, norm=False,
weights=w_dat, bootstrap=c_dict['se_boot_iate'],
se_yes=c_dict['iate_se_flag'])
else:
if o_idx == 0:
w_add[t_idx, w_index] = ret[2]
if c_dict['iate_se_flag']:
w_i_unc_sum = np.sum(w_i_unc)
if not (1-1e-10) < w_i_unc_sum < (1+1e-10):
w_add_unc[t_idx, w_index] = w_i_unc / w_i_unc_sum
else:
w_add_unc[t_idx, w_index] = w_i_unc
if w_ate is None:
w_diff = w_add_unc[t_idx, :]
else:
w_diff = w_add_unc[t_idx, :] - w_ate[t_idx, :]
if c_dict['iate_se_flag']:
ret = gp_est.weight_var(
w_diff, y_dat[:, o_idx], None, c_dict, norm=False,
weights=w_dat, bootstrap=c_dict['se_boot_iate'],
se_yes=c_dict['iate_se_flag'])
if c_dict['iate_se_flag']:
pot_y_m_ate_i[t_idx, o_idx] = ret[0]
pot_y_m_ate_var_i[t_idx, o_idx] = ret[1]
l1_to_9 = mcf_ate.analyse_weights_ate(w_add, None, c_dict, False)
return (idx, pot_y_i, pot_y_var_i, pot_y_m_ate_i, pot_y_m_ate_var_i,
l1_to_9, share_i)
def print_iate(iate, iate_se, iate_p, effect_list, v_dict, c_dict):
"""Print statistics for the two types of IATEs.
Parameters
----------
iate : 4D Numpy array. Effects. (obs x outcome x effects x type_of_effect)
iate_se : 4D Numpy array. Standard errors.
iate_t : 4D Numpy array.
iate_p : 4D Numpy array.
effect_list : List. Names of effects.
v : Dict. Variables.
Returns
-------
None.
"""
no_outcomes = np.size(iate, axis=1)
n_obs = len(iate)
str_f = '=' * 80
str_m = '-' * 80
str_l = '- ' * 40
print('\n')
print(str_f, '\nDescriptives for IATE estimation', '\n' + str_m)
for types in range(2):
if types == 0:
print('IATE with corresponding statistics', '\n' + str_l)
else:
print('IATE minus ATE with corresponding statistics ',
'(weights not censored)', '\n' + str_l)
for o_idx in range(no_outcomes):
print('\nOutcome variable: ', v_dict['y_name'][o_idx])
print(str_l)
if c_dict['iate_se_flag']:
print('Comparison Mean Median Std Effect > 0',
'mean(SE) sig 10% sig 5% sig 1%')
else:
print('Comparison Mean Median Std Effect > 0')
for jdx, effects in enumerate(effect_list):
print('{:<3} vs {:>3}'.format(effects[0],
effects[1]), end=' ')
est = iate[:, o_idx, jdx, types].reshape(-1)
if c_dict['iate_se_flag']:
stderr = iate_se[:, o_idx, jdx, types].reshape(-1)
p_val = iate_p[:, o_idx, jdx, types].reshape(-1)
print('{:10.5f} {:10.5f} {:10.5f}'.format(
np.mean(est), np.median(est), np.std(est)), end=' ')
if c_dict['iate_se_flag']:
print('{:6.2f}% {:10.5f} {:6.2f}% {:6.2f}% {:6.2f}%'
.format(np.count_nonzero(est > 1e-15) / n_obs * 100,
np.mean(stderr),
np.count_nonzero(p_val < 0.1) / n_obs * 100,
np.count_nonzero(p_val < 0.05) / n_obs * 100,
np.count_nonzero(p_val < 0.01) / n_obs * 100)
)
else:
print()
print(str_m, '\n')
print('-' * 80)
if c_dict['iate_se_flag']:
gp_est.print_se_info(c_dict['cluster_std'], c_dict['se_boot_iate'])
def post_estimation_iate(file_name, iate_pot_all_name, ate_all, ate_all_se,
effect_list, v_dict, c_dict, v_x_type):
"""Do post-estimation analysis: correlations, k-means, sorted effects.
Parameters
----------
file_name : String. Name of file with potential outcomes and effects.
iate_pot_all_name : Dict. Name of potential outcomes and effects.
ate_all : 3D Numpy array. ATEs.
ate_all_se : 3D Numpy array. Std.errors of ATEs.
effect_list : List of list. Explanation of effects related to ATEs.
v : Dict. Variables.
c : Dict. Parameters.
Returns
-------
None.
"""
if c_dict['with_output'] and c_dict['verbose']:
print('\nPost estimation analysis')
if c_dict['relative_to_first_group_only']:
iate_pot_name = iate_pot_all_name[1]
dim_all = (len(ate_all), c_dict['no_of_treat']-1)
ate = np.empty(dim_all)
ate_se = np.empty(dim_all)
jdx = 0
for idx, i_lab in enumerate(effect_list):
if i_lab[1] == c_dict['d_values'][0]: # compare to 1st treat only
ate[:, jdx] = ate_all[:, 0, idx]
ate_se[:, jdx] = ate_all_se[:, 0, idx]
jdx += 1
else:
iate_pot_name = iate_pot_all_name[0]
dim_all = (np.size(ate_all, axis=0), np.size(ate_all, axis=2))
ate = np.empty(dim_all)
ate_se = np.empty_like(ate)
ate = ate_all[:, 0, :]
ate_se = ate_all_se[:, 0, :]
ate = ate.reshape(-1)
ate_se = ate_se.reshape(-1)
data = pd.read_csv(file_name)
pot_y = data[iate_pot_name['names_pot_y']] # deep copies
iate = data[iate_pot_name['names_iate']]
x_name = delete_x_with_catv(v_x_type.keys())
x_dat = data[x_name]
cint = sct.norm.ppf(c_dict['fig_ci_level'] +
0.5 * (1 - c_dict['fig_ci_level']))
if c_dict['bin_corr_yes']:
print('\n' + ('=' * 80), '\nCorrelations of effects with ... in %')
print('-' * 80)
label_ci = str(c_dict['fig_ci_level'] * 100) + '%-CI'
iterator = range(2) if c_dict['iate_se_flag'] else range(1)
for idx in range(len(iate_pot_name['names_iate'])):
for imate in iterator:
if imate == 0:
name_eff = 'names_iate'
ate_t = ate[idx].copy()
ate_se_t = ate_se[idx].copy()
else:
name_eff = 'names_iate_mate'
ate_t = 0
name_iate_t = iate_pot_name[name_eff][idx]
if c_dict['iate_se_flag']:
name_se = name_eff + '_se'
name_iate_se_t = iate_pot_name[name_se][idx]
else:
name_se = name_iate_se_t = None
titel = 'Sorted' + name_iate_t
# Add correlation analyis of IATEs
if c_dict['bin_corr_yes'] and (imate == 0):
print('Effect:', name_iate_t, '\n' + ('-' * 80))
corr = iate.corrwith(data[name_iate_t])
for jdx in corr.keys():
print('{:<20} {:>8.2f}'.format(jdx, corr[jdx] * 100))
print('-' * 80)
corr = pot_y.corrwith(data[name_iate_t])
for jdx in corr.keys():
print('{:<20} {:>8.2f}'.format(jdx, corr[jdx] * 100))
print('-' * 80)
corr = x_dat.corrwith(data[name_iate_t])
corr = corr.sort_values()
for jdx in corr.keys():
if np.abs(corr[jdx].item()) > c_dict['bin_corr_thresh']:
print('{:<20} {:>8.2f}'.format(jdx, corr[jdx] * 100))
print('-' * 80)
iate_temp = data[name_iate_t].to_numpy()
if c_dict['iate_se_flag']:
iate_se_temp = data[name_iate_se_t].to_numpy()
else:
iate_se_temp = None
sorted_ind = np.argsort(iate_temp)
iate_temp = iate_temp[sorted_ind]
if c_dict['iate_se_flag']:
iate_se_temp = iate_se_temp[sorted_ind]
x_values = np.arange(len(iate_temp)) + 1
k = np.round(c_dict['knn_const'] * np.sqrt(len(iate_temp)) * 2)
iate_temp = gp_est.moving_avg_mean_var(iate_temp, k, False)[0]
if c_dict['iate_se_flag']:
iate_se_temp = gp_est.moving_avg_mean_var(
iate_se_temp, k, False)[0]
file_name_jpeg = c_dict['fig_pfad_jpeg'] + '/' + titel + '.jpeg'
file_name_pdf = c_dict['fig_pfad_pdf'] + '/' + titel + '.pdf'
file_name_csv = c_dict['fig_pfad_csv'] + '/' + titel + '.csv'
if c_dict['iate_se_flag']:
upper = iate_temp + iate_se_temp * cint
lower = iate_temp - iate_se_temp * cint
ate_t = ate_t * np.ones(len(iate_temp))
if imate == 0:
ate_upper = ate_t + (ate_se_t * cint * np.ones(len(iate_temp)))
ate_lower = ate_t - (ate_se_t * cint * np.ones(len(iate_temp)))
line_ate = '_-r'
line_iate = '-b'
fig, axe = plt.subplots()
if imate == 0:
label_t = 'IATE'
label_r = 'ATE'
else:
label_t = 'IATE-ATE'
label_r = '_nolegend_'
axe.plot(x_values, iate_temp, line_iate, label=label_t)
axe.set_ylabel(label_t)
axe.plot(x_values, ate_t, line_ate, label=label_r)
if imate == 0:
axe.fill_between(x_values, ate_upper, ate_lower,
alpha=0.3, color='r', label=label_ci)
axe.set_title(titel)
axe.set_xlabel('Ordered observations')
if c_dict['iate_se_flag']:
axe.fill_between(x_values, upper, lower, alpha=0.3, color='b',
label=label_ci)
axe.legend(loc='lower right', shadow=True,
fontsize=c_dict['fig_fontsize'])
if c_dict['post_plots']:
gp.delete_file_if_exists(file_name_jpeg)
gp.delete_file_if_exists(file_name_pdf)
fig.savefig(file_name_jpeg, dpi=c_dict['fig_dpi'])
fig.savefig(file_name_pdf, dpi=c_dict['fig_dpi'])
if c_dict['show_plots']:
plt.show()
else:
plt.close()
iate_temp = iate_temp.reshape(-1, 1)
if c_dict['iate_se_flag']:
upper = upper.reshape(-1, 1)
lower = lower.reshape(-1, 1)
ate_t = ate_t.reshape(-1, 1)
iate_temp = iate_temp.reshape(-1, 1)
if imate == 0:
ate_upper = ate_upper.reshape(-1, 1)
ate_lower = ate_lower.reshape(-1, 1)
if c_dict['iate_se_flag']:
effects_et_al = np.concatenate((upper, iate_temp, lower,
ate_t, ate_upper,
ate_lower), axis=1)
cols = ['upper', 'effects', 'lower', 'ate', 'ate_l',
'ate_u']
else:
effects_et_al = np.concatenate((iate_temp, ate_t,
ate_upper, ate_lower),
axis=1)
cols = ['effects', 'ate', 'ate_l', 'ate_u']
else:
effects_et_al = np.concatenate((upper, iate_temp, lower,
ate_t), axis=1)
cols = ['upper', 'effects', 'lower', 'ate']
datasave = pd.DataFrame(data=effects_et_al, columns=cols)
gp.delete_file_if_exists(file_name_csv)
datasave.to_csv(file_name_csv, index=False)
# density plots
if imate == 0:
titel = 'Density' + iate_pot_name['names_iate'][idx]
file_name_jpeg = (c_dict['fig_pfad_jpeg'] + '/' + titel +
'.jpeg')
file_name_pdf = c_dict['fig_pfad_pdf'] + '/' + titel + '.pdf'
file_name_csv = c_dict['fig_pfad_csv'] + '/' + titel + '.csv'
iate_temp = data[name_iate_t].to_numpy()
bandwidth = gp_est.bandwidth_silverman(iate_temp, 1)
dist = np.abs(iate_temp.max() - iate_temp.min())
low_b = iate_temp.min() - 0.1 * dist
up_b = iate_temp.max() + 0.1 * dist
grid = np.linspace(low_b, up_b, 1000)
density = gp_est.kernel_density(iate_temp, grid, 1, bandwidth)
fig, axe = plt.subplots()
axe.set_title(titel)
axe.set_ylabel('Estimated density')
axe.plot(grid, density, '-b')
axe.fill_between(grid, density, alpha=0.3, color='b')
if c_dict['post_plots']:
gp.delete_file_if_exists(file_name_jpeg)
gp.delete_file_if_exists(file_name_pdf)
fig.savefig(file_name_jpeg, dpi=c_dict['fig_dpi'])
fig.savefig(file_name_pdf, dpi=c_dict['fig_dpi'])
if c_dict['show_plots']:
plt.show()
else:
plt.close()
density = density.reshape(-1, 1)
cols = ['grid', 'density']
grid = grid.reshape(-1, 1)
density = density.reshape(-1, 1)
effects_et_al = np.concatenate((grid, density), axis=1)
datasave = pd.DataFrame(data=effects_et_al, columns=cols)
gp.delete_file_if_exists(file_name_csv)
datasave.to_csv(file_name_csv, index=False)
# k-means clustering
if c_dict['post_km']:
pd.set_option('display.max_rows', 1000, 'display.max_columns', 100)
iate_np = iate.to_numpy()
silhouette_avg_prev = -1
print('\n' + ('=' * 80), '\nK-Means++ clustering', '\n' + ('-' * 80))
print('-' * 80)
for cluster_no in c_dict['post_km_no_of_groups']:
cluster_lab_tmp = KMeans(
n_clusters=cluster_no,
n_init=c_dict['post_km_replications'], init='k-means++',
max_iter=c_dict['post_kmeans_max_tries'], algorithm='full',
random_state=42, tol=1e-5, verbose=0, copy_x=True
).fit_predict(iate_np)
silhouette_avg = silhouette_score(iate_np, cluster_lab_tmp)
print('Number of clusters: ', cluster_no,
'Average silhouette score:', silhouette_avg)
if silhouette_avg > silhouette_avg_prev:
cluster_lab_np = np.copy(cluster_lab_tmp)
silhouette_avg_prev = np.copy(silhouette_avg)
print('Best value of average silhouette score:', silhouette_avg_prev)
print('-' * 80)
del iate_np
# Reorder labels for better visible inspection of results
iate_name = iate_pot_name['names_iate']
namesfirsty = iate_name[0:round(len(iate_name)/len(v_dict['y_name']))]
cl_means = iate[namesfirsty].groupby(by=cluster_lab_np).mean()
cl_means_np = cl_means.to_numpy()
cl_means_np = np.mean(cl_means_np, axis=1)
sort_ind = np.argsort(cl_means_np)
cl_group = cluster_lab_np.copy()
for cl_j, cl_old in enumerate(sort_ind):
cl_group[cluster_lab_np == cl_old] = cl_j
print('Effects are ordered w.r.t. to size of the effects for the',
' first outcome.')
print('Effects', '\n' + ('-' * 80))
daten_neu = data.copy()
daten_neu['IATE_Cluster'] = cl_group
gp.delete_file_if_exists(file_name)
daten_neu.to_csv(file_name)
del daten_neu
cl_means = iate.groupby(by=cl_group).mean()
print(cl_means.transpose())
print('-' * 80, '\nPotential outcomes', '\n' + ('-' * 80))
cl_means = pot_y.groupby(by=cl_group).mean()
print(cl_means.transpose())
print('-' * 80, '\nCovariates', '\n' + ('-' * 80))
names_unordered = []
for x_name in v_x_type.keys():
if v_x_type[x_name] > 0:
names_unordered.append(x_name)
if names_unordered: # List is not empty
x_dummies = pd.get_dummies(x_dat, columns=names_unordered)
x_km = | pd.concat([x_dat, x_dummies], axis=1) | pandas.concat |
import os
import pickle
import re
from pathlib import Path
from typing import Tuple, Dict
import pandas as pd
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from brFinance.scraper.cvm.financial_report import FinancialReport
from brFinance.scraper.cvm.search import SearchDFP, SearchITR
from brFinance.utils.browser import Browser
class Company:
"""
An instance of a Company can fetch useful information about Financial Reports, Social Capital and Registration Data
"""
def __init__(self, cvm_code: int):
"""
Parameters
----------
cvm_code : int
CVM company code
"""
self.cvm_code = cvm_code
def _save_files(self, driver, report_info) -> Dict:
document_number = re.search(r"(?<=\Documento=)(.*?)(?=&)", report_info['linkView']).group()
# Create folder and save reports locally
path_save_reports = f'{os.getcwd()}/reports'
report_file = f'{path_save_reports}/{document_number}.plk'
Path(path_save_reports).mkdir(exist_ok=True)
# Check if report is available locally, otherwise scrape it.
if Path(report_file).exists():
with open(report_file, 'rb') as load_report:
report_obj = pickle.load(load_report)
print("Carregado localmente!")
else:
report_obj = FinancialReport(link=report_info["linkView"], driver=driver).get_report()
with open(report_file, 'wb') as save_report:
pickle.dump(report_obj, save_report)
return report_obj
def get_social_capital_data(self) -> pd.DataFrame:
"""
Returns a dataframe including number of preference or ordinal shares for a company
Returns
-------
pandas.Dataframe
Dataframe with Social Capital Data
"""
url = f"http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM={self.cvm_code}"
df = | pd.DataFrame() | pandas.DataFrame |
__author__ = 'Yan'
import pandas
import sklearn.metrics
import statistics
from sklearn import tree
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from io import StringIO
from IPython.display import Image
import pydotplus
# bug fix for display formats to avoid run time errors
| pandas.set_option('display.float_format', lambda x:'%.2f'%x) | pandas.set_option |
import json
import numpy as np
import pytest
from pandas import DataFrame, Index, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [
{
"country": "USA",
"states": [
{
"name": "California",
"cities": [
{"name": "San Francisco", "pop": 12345},
{"name": "Los Angeles", "pop": 12346},
],
},
{
"name": "Ohio",
"cities": [
{"name": "Columbus", "pop": 1234},
{"name": "Cleveland", "pop": 1236},
],
},
],
},
{
"country": "Germany",
"states": [
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
{
"name": "Nordrhein-Westfalen",
"cities": [
{"name": "Duesseldorf", "pop": 1238},
{"name": "Koeln", "pop": 1239},
],
},
],
},
]
@pytest.fixture
def state_data():
return [
{
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
"info": {"governor": "<NAME>"},
"shortname": "FL",
"state": "Florida",
},
{
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
"info": {"governor": "<NAME>"},
"shortname": "OH",
"state": "Ohio",
},
]
@pytest.fixture
def author_missing_data():
return [
{"info": None},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
@pytest.fixture
def missing_metadata():
return [
{
"name": "Alice",
"addresses": [
{
"number": 9562,
"street": "Morris St.",
"city": "Massillon",
"state": "OH",
"zip": 44646,
}
],
},
{
"addresses": [
{
"number": 8449,
"street": "Spring St.",
"city": "Elizabethton",
"state": "TN",
"zip": 37643,
}
]
},
]
@pytest.fixture
def max_level_test_input_data():
"""
input data to test json_normalize with max_level param
"""
return [
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
]
class TestJSONNormalize:
def test_simple_records(self):
recs = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 9},
{"a": 10, "b": 11, "c": 12},
]
result = | json_normalize(recs) | pandas.io.json.json_normalize |
#!/usr/bin/env python
import argparse
import glob
import os
from abc import abstractmethod, ABC
from collections import defaultdict
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import RepeatedKFold
from qpputils import dataparser as dp
# TODO: change the functions to work with pandas methods such as idxmax
# TODO: Consider change to the folds file to be more convenient for pandas DF
parser = argparse.ArgumentParser(description='Cross Validation script',
usage='Use CV to optimize correlation',
epilog='Prints the average correlation')
parser.add_argument('-p', '--predictions', metavar='predictions_dir', default='predictions',
help='path to prediction results files directory')
parser.add_argument('--labeled', default='baseline/QLmap1000', help='path to labeled list res')
parser.add_argument('-r', '--repeats', default=30, help='number of repeats')
parser.add_argument('-k', '--splits', default=2, help='number of k-fold')
parser.add_argument('-m', '--measure', default='pearson', type=str,
help='default correlation measure type is pearson', choices=['pearson', 'spearman', 'kendall'], )
parser.add_argument("-g", "--generate", help="generate new CrossValidation sets", action="store_true")
parser.add_argument('-f', "--folds_file", metavar='CV_FILE_PATH', help="load existing CrossValidation JSON res",
default='2_folds_30_repetitions.json')
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
class CrossValidation:
def __init__(self, folds_map_file=None, k=2, rep=30, predictions_dir=None, test='pearson', ap_file=None,
generate_folds=False, **kwargs):
logging.debug("testing logger")
self.k = k
self.rep = rep
self.test = test
assert predictions_dir, 'Specify predictions dir'
assert folds_map_file, 'Specify path for CV folds file'
predictions_dir = os.path.abspath(os.path.normpath(os.path.expanduser(predictions_dir)))
assert os.listdir(predictions_dir), f'{predictions_dir} is empty'
self.output_dir = dp.ensure_dir(predictions_dir.replace('predictions', 'evaluation'))
if ap_file:
self.full_set = self._build_full_set(predictions_dir, ap_file)
if '-' in ap_file:
self.ap_func = ap_file.split('-')[-1]
else:
self.ap_func = 'basic'
else:
self.full_set = self._build_full_set(predictions_dir)
if generate_folds:
self.index = self.full_set.index
self.folds_file = self._generate_k_folds()
self.__load_k_folds()
else:
try:
self.folds_file = dp.ensure_file(folds_map_file)
except FileExistsError:
print("The folds file specified doesn't exist, going to generate the file and save")
self.__load_k_folds()
# self.corr_df = NotImplemented
@abstractmethod
def calc_function(self, df: pd.DataFrame):
raise NotImplementedError
@staticmethod
def _build_full_set(predictions_dir, ap_file=None):
"""Assuming the predictions files are named : predictions-[*]"""
all_files = glob.glob(predictions_dir + "/*predictions*")
if 'uef' in predictions_dir:
# Excluding all the 5 and 10 docs predictions
if 'qf' in predictions_dir:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5+', 11, 14) and not os.path.basename(fn).endswith(
'-10+', 11, 15)]
else:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5') and not os.path.basename(fn).endswith('-10')]
list_ = []
for file_ in all_files:
fname = file_.split('-')[-1]
df = dp.ResultsReader(file_, 'predictions').data_df
df = df.rename(columns={"score": f'score_{fname}'})
list_.append(df)
if ap_file:
ap_df = dp.ResultsReader(ap_file, 'ap').data_df
list_.append(ap_df)
full_set = pd.concat(list_, axis=1, sort=True)
assert not full_set.empty, f'The Full set DF is empty, make sure that {predictions_dir} is not empty'
return full_set
def _generate_k_folds(self):
# FIXME: Need to fix it to generate a DF with folds, without redundancy
""" Generates a k-folds json res
:rtype: str (returns the saved JSON filename)
"""
rkf = RepeatedKFold(n_splits=self.k, n_repeats=self.rep)
count = 1
# {'set_id': {'train': [], 'test': []}}
results = defaultdict(dict)
for train, test in rkf.split(self.index):
train_index, test_index = self.index[train], self.index[test]
if count % 1 == 0:
results[int(count)]['a'] = {'train': train_index, 'test': test_index}
else:
results[int(count)]['b'] = {'train': train_index, 'test': test_index}
count += 0.5
temp = pd.DataFrame(results)
temp.to_json(f'{self.k}_folds_{self.rep}_repetitions.json')
return f'{self.k}_folds_{self.rep}_repetitions.json'
def __load_k_folds(self):
# self.data_sets_map = pd.read_json(self.file_name).T['a'].apply(pd.Series).rename(
# mapper={'train': 'fold-1', 'test': 'fold-2'}, axis='columns')
self.data_sets_map = pd.read_json(self.folds_file)
def _calc_eval_metric_df(self):
sets = self.data_sets_map.index
folds = self.data_sets_map.columns
corr_results = defaultdict(dict)
for set_id in sets:
for fold in folds:
train_queries = set()
# a hack to create a set out of train queries, from multiple lists
_ = {train_queries.update(i) for i in self.data_sets_map.loc[set_id, folds != fold].values}
test_queries = set(self.data_sets_map.loc[set_id, fold])
train_set = self.full_set.loc[map(str, train_queries)]
test_set = self.full_set.loc[map(str, test_queries)]
corr_results[set_id][fold] = pd.DataFrame(
{'train': self.calc_function(train_set), 'test': self.calc_function(test_set)})
corr_df = pd.DataFrame.from_dict(corr_results, orient='index')
try:
corr_df.to_pickle(
f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}.pkl')
except AttributeError:
corr_df.to_pickle(f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl')
return corr_df
def calc_test_results(self):
if not hasattr(self, 'corr_df'):
self.corr_df = self._calc_eval_metric_df()
sets = self.data_sets_map.index
full_results = defaultdict(dict)
simple_results = defaultdict()
for set_id in sets:
_res_per_set = []
for fold in self.corr_df.loc[set_id].index:
max_train_param = self.corr_df.loc[set_id, fold].idxmax()['train']
train_result, test_result = self.corr_df.loc[set_id, fold].loc[max_train_param]
_res_per_set.append(test_result)
full_results[set_id, fold] = {'best_train_param': max_train_param.split('_')[1],
'best_train_val': train_result, 'test_val': test_result}
simple_results[f'set_{set_id}'] = np.mean(_res_per_set)
full_results_df = pd.DataFrame.from_dict(full_results, orient='index')
try:
full_results_df.to_json(
f'{self.output_dir}/'
f'full_results_vector_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}_{self.test}.json')
except AttributeError:
full_results_df.to_json(
f'{self.output_dir}/'
f'full_results_vector_for_{self.k}_folds_{self.rep}_repetitions_pageRank_{self.test}.json')
simple_results_df = pd.Series(simple_results)
try:
simple_results_df.to_json(
f'{self.output_dir}/'
f'simple_results_vector_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}.json')
except AttributeError:
simple_results_df.to_json(
f'{self.output_dir}/'
f'simple_results_vector_for_{self.k}_folds_{self.rep}_repetitions_pageRank.json')
mean = simple_results_df.mean()
return f'{mean:.3f}'
@staticmethod
def read_eval_results(results_file):
# FIXME: need to fix it after changing the format of the eval files
temp_df = pd.read_json(results_file, orient='index')
# Split column of lists into several columns
res_df = pd.DataFrame(temp_df['best train a'].values.tolist(), index=temp_df.index.str.split().str[1],
columns=['a', 'train_correlation_a'])
res_df.rename_axis('set', inplace=True)
res_df[['b', 'train_correlation_b']] = pd.DataFrame(temp_df['best train b'].values.tolist(),
index=temp_df.index.str.split().str[1])
return res_df
class InterTopicCrossValidation(CrossValidation, ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.calc_function = self.calc_inter_topic_corr if kwargs.get('ap_file') else self.calc_inter_topic_scores
# self.corr_df = self._calc_eval_metric_df()
def calc_inter_topic_corr(self, df):
dict_ = {}
for col in df.columns:
if col != 'ap':
dict_[col] = df[col].corr(df['ap'], method=self.test)
else:
continue
return pd.Series(dict_)
def calc_inter_topic_scores(self, df):
return df.mean().to_dict()
class IntraTopicCrossValidation(CrossValidation, ABC):
"""
Class for intra topic evaluation, i.e. evaluation is per topic across its variants
Parameters
----------
:param bool save_calculations: set to True to save the intermediate results.
in order to load intermediate results use a specific method to do that explicitly, the results will not
be loaded during calculation in order to avoid bugs.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sir = kwargs.get('save_calculations', False)
self.full_set = dp.add_topic_to_qdf(self.full_set).set_index('topic')
if kwargs.get('ap_file'):
self.calc_function = self.calc_intra_topic_corr
# self.corr_df = self._calc_eval_metric_df()
# else:
# self.calc_function = self.calc_intra_topic_corr
self.test_per_topic = pd.DataFrame(index=self.full_set.index.unique())
def _calc_eval_metric_df(self):
sets = self.data_sets_map.index
folds = self.data_sets_map.columns
corr_results = defaultdict(dict)
for set_id in sets:
_test = []
for fold in folds:
train_queries = set()
# a hack to create a set out of train queries, from multiple lists
_ = {train_queries.update(i) for i in self.data_sets_map.loc[set_id, folds != fold].values}
test_queries = set(self.data_sets_map.loc[set_id, fold])
train_set = self.full_set.loc[map(str, train_queries)]
test_set = self.full_set.loc[map(str, test_queries)]
_ts_df = self.calc_function(test_set)
_tr_df = self.calc_function(train_set)
_test_df = _ts_df.loc[:, _ts_df.columns != 'weight'].apply(np.average, axis='index',
weights=_ts_df['weight'])
_train_df = _tr_df.loc[:, _tr_df.columns != 'weight'].apply(np.average, axis='index',
weights=_tr_df['weight'])
_sr = _ts_df.loc[:, _train_df.idxmax()]
_sr.name = set_id
self.test_per_topic = self.test_per_topic.join(_sr, rsuffix=f'-{set_id}')
corr_results[set_id][fold] = pd.DataFrame({'train': _train_df, 'test': _test_df})
self.test_per_topic['weight'] = self.full_set.groupby('topic')['qid'].count()
corr_df = pd.DataFrame.from_dict(corr_results, orient='index')
try:
corr_df.to_pickle(
f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}.pkl')
except AttributeError:
corr_df.to_pickle(f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl')
if self.sir:
self.test_per_topic.to_pickle(
f'{self.output_dir}/per_topic_correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl')
return corr_df
def calc_intra_topic_corr(self, df: pd.DataFrame):
"""
This method calculates Kendall tau's correlation coefficient per topic, and returns
the weighted average correlation over the topics. Weighted by number of vars.
:param df:
:return: pd.Series, the index is all the hyper params and values are weighted average correlations
"""
dict_ = {}
df = df.reset_index().set_index(['topic', 'qid'])
for topic, _df in df.groupby('topic'):
dict_[topic] = _df.loc[:, _df.columns != 'ap'].corrwith(_df['ap'], method=self.test).append(
pd.Series({'weight': len(_df)}))
# dict_[topic] = _df.loc[:, _df.columns != 'ap'].corrwith(_df['ap'], method='pearson')
_df = pd.DataFrame.from_dict(dict_, orient='index')
# self.test_per_topic = _df
return _df
def load_per_topic_df(self):
try:
inter_res_file = dp.ensure_file(
f'{self.output_dir}/per_topic_correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl')
except AssertionError:
logging.warning(
f"File {self.output_dir}/per_topic_correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl doesnt exist")
return None
df = | pd.read_pickle(inter_res_file) | pandas.read_pickle |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.