prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
|---|---|---|
"""
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Generic,
Hashable,
Iterator,
Sequence,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
import pandas._libs.groupby as libgroupby
import pandas._libs.reduction as libreduction
from pandas._typing import (
ArrayLike,
DtypeObj,
F,
FrameOrSeries,
Shape,
final,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_cast_result_dtype,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCCategoricalIndex
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import (
base,
grouper,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
ensure_index,
)
from pandas.core.internals import ArrayManager
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_flattened_list,
get_group_index,
get_group_index_sorter,
get_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.groupby
"""
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"min": "group_min",
"max": "group_max",
"mean": "group_mean",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"last": "group_last",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumsum": "group_cumsum",
"cummin": "group_cummin",
"cummax": "group_cummax",
"rank": "group_rank",
},
}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(maxsize=None)
def _get_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, ftype)
if is_numeric:
return f
elif dtype == object:
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : callable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = getattr(libgroupby, f"group_{how}_float64")
return func, values
func = self._get_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "mean", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_any_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.get(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def get_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: np.ndarray | None = None,
dropna: bool = True,
):
assert isinstance(axis, Index), axis
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.dropna = dropna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return len(self.groupings)
def get_iterator(
self, data: FrameOrSeries, axis: int = 0
) -> Iterator[tuple[Hashable, FrameOrSeries]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="groupby")
@final
def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been called for the subsetted objects returned.
"""
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouper
@final
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_list(comp_ids, ngroups, self.levels, self.codes)
@final
def apply(self, f: F, data: FrameOrSeries, axis: int = 0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
result_values = None
if data.ndim == 2 and any(
isinstance(x, ExtensionArray) for x in data._iter_column_arrays()
):
# calling splitter.fast_apply will raise TypeError via apply_frame_axis0
# if we pass EA instead of ndarray
# TODO: can we have a workaround for EAs backed by ndarray?
pass
elif isinstance(data._mgr, ArrayManager):
# TODO(ArrayManager) don't use fast_apply / libreduction.apply_frame_axis0
# for now -> relies on BlockManager internals
pass
elif (
com.get_callable_name(f) not in base.plotting_methods
and isinstance(splitter, FrameSplitter)
and axis == 0
# fast_apply/libreduction doesn't allow non-numpy backed indexes
and not data.index._has_complex_internals
):
try:
sdata = splitter.sorted_data
result_values, mutated = splitter.fast_apply(f, sdata, group_keys)
except IndexError:
# This is a rare case in which re-running in python-space may
# make a difference, see test_apply_mutate.test_mutate_groups
pass
else:
# If the fast apply path could be used we can return here.
# Otherwise we need to fall back to the slow implementation.
if len(result_values) == len(group_keys):
return group_keys, result_values, mutated
if result_values is None:
# result_values is None if fast apply path wasn't taken
# or fast apply aborted with an unexpected exception.
# In either case, initialize the result list and perform
# the slow iteration.
result_values = []
skip_first = False
else:
# If result_values is not None we're in the case that the
# fast apply loop was broken prematurely but we have
# already the result for the first group which we can reuse.
skip_first = True
# This calls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
if skip_first:
# pop the first item from the front of the iterator
next(zipped)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1 and isinstance(
self.result_index, ABCCategoricalIndex
):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return get_indexer_dict(codes_list, keys)
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Series:
"""
Compute group sizes.
"""
ids, _, ngroup = self.group_info
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = []
return
|
Series(out, index=self.result_index, dtype="int64")
|
pandas.core.series.Series
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MultiLabelBinarizer
def prep_fbi_dataset():
df=
|
pd.read_csv("https://raw.githubusercontent.com/CMU-IDS-2022/final-project-crime-scene/main/data/hate_crime.csv")
|
pandas.read_csv
|
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
load:
url:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`load_cf`
Inputs
------
Outputs
-------
- ``resource/time_series_60min_singleindex_filtered.csv``:
"""
import logging
logger = logging.getLogger(__name__)
#from _helpers import configure_logging
import re
import pandas as pd
import numpy as np
import json
from shapely.geometry import LineString,Point
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_absolute_error
#-----------------#
# utils functions #
#-----------------#
def unpack_param(df):
if 'param' in df.columns:
new = df.param.apply(string2list)
df.lat=df.lat.apply(string2list)
df.long=df.long.apply(string2list)
return df.merge(pd.DataFrame(list(new)), left_index=True, right_index=True)
else:
return 0
def string2list(string, with_None=True):
p = re.compile('(?<!\\\\)\'')
string = p.sub('\"', string)
if with_None:
p2 = re.compile('None')
string = p2.sub('\"None\"', string)
return json.loads(string)
#use to create geo object
def change2linestring(df):
# rows level process
df['linestring']=[]
for index in range(len(df['lat'])):
df['linestring'].append((df['long'][index],df['lat'][index]))
df['linestring']=LineString(df['linestring'])
return df
def addLinestring(df):
#dataframe level process
df=df.reset_index(drop=True)
df['linestring']='L'
df=df.apply(change2linestring,axis=1)
return df
def recalculate_pipe_capacity(pipe_diameter_mm):
"""Calculate pipe capacity based on diameter.
20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV)
24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV)
36 inch (900 mm) 50 bar -> 11.25 GW CH4 pipe capacity (LHV)
48 inch (1200 mm) 80 bar -> 21.7 GW CH4 pipe capacity (LHV)
Based on p.15 of (https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf"""
# slope
m0 = (5-1.5) / (600-500)
m1 = (11.25-5)/(900-600)
m2 = (21.7-11.25)/(1200-900)
if np.isnan(pipe_diameter_mm):
return np.nan
if pipe_diameter_mm<500:
return np.nan
if pipe_diameter_mm<600 and pipe_diameter_mm>=500:
return -16 + m0 * pipe_diameter_mm
if pipe_diameter_mm<900 and pipe_diameter_mm>=600:
return -7.5 + m1 * pipe_diameter_mm
else:
return -20.1 + m2 * pipe_diameter_mm
def convert_gas_to_hydrogen_capacity(gas_capacity):
return gas_capacity/3
def convert_gasVolume2hydrogenCapacity(df):
R_s = 518.4
# temperature [Kelvin] (assuming 10°Celsius)
T = 10 + 273.15
# density [kg/m^3]= pressure [kg/ms^2] / (T * R_s), 1 bar = 1e5 kg/(ms^2)
pressure = df.max_pressure_bar.fillna(45)
density = pressure * 1e5 / (T * R_s)
# mass flow [kg/ h], Mega = 1e6,
#mass_flow = df.max_cap_M_m3_per_d * 1e6 / 8760 * density
mass_flow = df.max_cap_M_m3_per_d * 1e6 / 24 * density
# gross calorific value (GCV in ENTSOT table) [kWh/kg]
gcv_lgas = 38.3 / 3.6
gcv_hgas = 47.3 / 3.6
# energy cap [MW] = mass_flow [kg/h] * gcv [kWh/kg] * 1e-3
energy_cap = mass_flow * 1e-3
energy_cap.loc[df.is_H_gas==1] *= gcv_hgas
energy_cap.loc[df.is_H_gas!=1] *= gcv_lgas
return energy_cap*1e-3 # to gw
#-----------------#
# main functions #
#-----------------#
def load_preprocessing_dataset(IGGINL_df_path, entsog_df_path, EMAP_df_path):
# --------------------------------------------
#load&prepocess IGGINL df
#--------------------------------------------
IGGINL = pd.read_csv(IGGINL_df_path, sep=';')
# unpack param column and concat the result to the dataframe
IGGINL = pd.concat([IGGINL,pd.DataFrame(IGGINL.param.apply(eval).to_list())],axis=1)
# convert capacity
IGGINL['max_capacity'] = convert_gas_to_hydrogen_capacity(IGGINL['max_cap_M_m3_per_d']) # gw
# unpack uncertainty column, add useful information to the dataframe
uncertainty = pd.DataFrame(IGGINL.uncertainty.apply(eval).to_list())
IGGINL['cap_uncertain'] = uncertainty['max_cap_M_m3_per_d']
IGGINL['diameter_uncertain'] = uncertainty['diameter_mm']
IGGINL['pressure_uncertain'] = uncertainty['max_pressure_bar']
# create columns to store our new estimation data
IGGINL['capacity_nan'] = IGGINL['max_capacity']
IGGINL['diameter_nan'] = IGGINL['diameter_mm']
IGGINL['pressure_nan'] = IGGINL['max_pressure_bar']
# remove the estimation value of sci grid gas project, only keep values with uncertainty 0
IGGINL.loc[IGGINL[IGGINL['cap_uncertain'] > 0].index, 'capacity_nan'] = np.nan
IGGINL.loc[IGGINL[IGGINL['diameter_uncertain'] > 0].index, 'diameter_nan'] = np.nan
IGGINL.loc[IGGINL[IGGINL['pressure_uncertain'] > 0].index, 'pressure_nan'] = np.nan
# add from to
IGGINL.country_code = IGGINL.country_code.apply(string2list)
IGGINL['from'] = IGGINL.country_code.str[0]
IGGINL['to'] = IGGINL.country_code.str[1]
# deal with whitespace
IGGINL['from'] = IGGINL['from'].str.strip()
IGGINL['to'] = IGGINL['to'].str.strip()
# add line string object
IGGINL['long'] = IGGINL['long'].apply(eval)
IGGINL['lat'] = IGGINL['lat'].apply(eval)
IGGINL = addLinestring(IGGINL)
# create new attributes
IGGINL['From'] = IGGINL.country_code.str[0]
IGGINL['To'] = IGGINL.country_code.str[1]
# convert node id to list object, if it is still string
try:
IGGINL.node_id = IGGINL.node_id.apply(string2list)
except:
pass
IGGINL['Node_0'] = IGGINL.node_id.str[0]
IGGINL['Node_1'] = IGGINL.node_id.str[1]
# --------------------------------------------
#load&preprocess entsog_df
# --------------------------------------------
entsog_dataset =
|
pd.read_csv(entsog_df_path)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import plotly.express as px
def calc(path, wave1, wave2):
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
import os
import re
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
from wordcloud import WordCloud, STOPWORDS
import numpy as np
import pandas as pd
from pandas import DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
from util.args import Args
def __download_stopwords():
# Collect all the related stopwords.
all_stopwords = list(stopwords.words('english'))
all_stopwords.extend(list(stopwords.words('german')))
all_stopwords.extend(list(stopwords.words('french')))
all_stopwords.extend(list(stopwords.words('russian')))
all_stopwords.extend(['https', 'youtube', 'VIDEO', 'youtu', 'CHANNEL', 'WATCH'])
return all_stopwords
USER_STOPWORDS = set(__download_stopwords())
def views_likes_dislikes_comments_normal_distribution(data: DataFrame, output_dir=Args.analysis_res_dir()):
data['likes_log'] = np.log(data['likes'] + 1)
data['view_count_log'] = np.log(data['view_count'] + 1)
data['dislikes_log'] = np.log(data['dislikes'] + 1)
data['comment_log'] = np.log(data['comment_count'] + 1)
plt.figure(figsize=(12, 6))
plt.subplot(221)
g1 = sns.distplot(data['view_count_log'])
g1.set_title("VIEWS LOG DISTRIBUTION", fontsize=16)
plt.subplot(224)
g2 = sns.distplot(data['likes_log'], color='green')
g2.set_title('LIKES LOG DISTRIBUTION', fontsize=16)
plt.subplot(223)
g3 = sns.distplot(data['dislikes_log'], color='r')
g3.set_title("DISLIKES LOG DISTRIBUTION", fontsize=16)
plt.subplot(222)
g4 = sns.distplot(data['comment_log'])
g4.set_title("COMMENTS LOG DISTRIBUTION", fontsize=16)
plt.subplots_adjust(wspace=0.2, hspace=0.4, top=0.9)
__save_figure(plt, output_dir, 'normal_distribution.png')
plt.close()
def correlation(data: DataFrame, output_dir=Args.analysis_res_dir()):
corr = data[['view_count', 'likes', 'dislikes', 'comment_count']].corr()
plot = sns.heatmap(corr, cmap='Blues', annot=True)
__save_figure(plot.get_figure(), output_dir, 'correlation.png')
plt.close()
def category_rating(data: DataFrame, output_dir=Args.analysis_res_dir()):
plt.figure(figsize=(30, 9))
plot = sns.countplot(data['category'], order=data['category'].value_counts().index)
plot.set_title("Counting the Video Category's ", fontsize=20)
plot.set_xlabel("", fontsize=20)
plot.set_ylabel("Count", fontsize=20)
__save_figure(plot.get_figure(), output_dir, 'category_rating.png')
plt.close()
# Plot the distribution of 'view_count','likes','dislikes','comment_count'
def distribution_boxplot(data: DataFrame, output_dir=Args.analysis_res_dir()):
view_count = np.log(data['view_count'] + 1)
likes = np.log(data['likes'] + 1)
dislikes = np.log(data['dislikes'] + 1)
comment = np.log(data['comment_count'] + 1)
data_count = pd.concat([view_count, likes, dislikes, comment], axis=1)
data_count.index = data['category']
data_count = data_count[(data_count != 0)]
plt.figure(figsize=(32, 20))
plt.subplot(2, 2, 1)
sns.boxplot(data_count.index, 'view_count', data=data_count, order=data['category'].value_counts().index)
plt.xticks(rotation=30, fontsize=12)
plt.subplot(2, 2, 2)
sns.boxplot(data_count.index, 'likes', data=data_count, order=data['category'].value_counts().index)
plt.xticks(rotation=30, fontsize=12)
plt.subplot(2, 2, 3)
sns.boxplot(data_count.index, 'dislikes', data=data_count, order=data['category'].value_counts().index)
plt.xticks(rotation=30, fontsize=12)
plt.subplot(2, 2, 4)
sns.boxplot(data_count.index, 'comment_count', data=data_count, order=data['category'].value_counts().index)
plt.xticks(rotation=30, fontsize=12)
__save_figure(plt, output_dir, 'distribution_boxplot.png')
plt.close()
# Plot the distribution of 'view_count','likes','dislikes','comment_count'
def distribution_plot(data: DataFrame, output_dir=Args.analysis_res_dir()):
general_view = pd.DataFrame(
data[['view_count', 'likes', 'dislikes', 'comment_count']].groupby(data['category']).mean())
plt.figure(figsize=(32, 20))
plt.subplot(2, 2, 1)
plt.plot(general_view.index, 'view_count', data=general_view, color='blue', linewidth=2, linestyle='solid')
plt.title('View_count vs Category')
plt.xticks(rotation=30)
plt.subplot(2, 2, 2)
plt.plot(general_view.index, 'likes', data=general_view, color='green', linewidth=2, linestyle='dotted')
plt.title('Likes vs Category')
plt.xticks(rotation=30)
plt.subplot(2, 2, 3)
plt.plot(general_view.index, 'dislikes', data=general_view, color='black', linewidth=2, linestyle='dashed')
plt.title('Dislikes vs Category')
plt.xticks(rotation=30)
plt.subplot(2, 2, 4)
plt.plot(general_view.index, 'comment_count', data=general_view, color='red', linewidth=2, linestyle='dashdot')
plt.title('Comment_count vs Category')
plt.xticks(rotation=30)
__save_figure(plt, output_dir, 'distribution_plot.png')
plt.close()
# The distribution of days that videos take to become popular
def distribution_of_days_preprocessing(data: DataFrame):
data['published_at'] = pd.to_datetime(data['published_at'], errors='coerce', format='%Y-%m-%dT%H:%M:%S.%fZ')
data['trending_date'] =
|
pd.to_datetime(data['trending_date'], errors='coerce', format='%y.%d.%m')
|
pandas.to_datetime
|
# some calculations about corona values
# start date somewhen in 3/2020
# added multiprocessing in 6/2020
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Process
# process files with these suffixes
localSuffixes = ["BW", "BY", "GER", "WORLD", "US"]
# flexinterval
flexInt = 7
def processWorker(localSuffix):
print (f"------ processing {localSuffix} -----\n")
#set the filename to process
dataFile = "coronaData_" + localSuffix + ".csv"
# read the data
total =
|
pd.read_csv(dataFile)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
|
assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
import math
import warnings
import numpy as np
import pandas as pd
import scipy.signal
import matplotlib.pyplot as plt
from typing import Optional, Union, List
from tqdm import tqdm
from signalanalysis.signalanalysis import general
from signalanalysis import signalplot
from signalanalysis import tools
class Egm(general.Signal):
"""Base class for EGM data, inheriting from :class:`signalanalysis.signalanalysis.general.Signal`
See Also
--------
:py:class:`signalanalysis.signalanalysis.general.Signal`
Methods
-------
read(folder)
Extract data from unipolar and bipolar DxL files
get_n_beats()
Supersedes generalised method to calculate n_beats
get_at
Calculates the activation time of the EGM
"""
def __init__(self,
data_location_uni: str,
data_location_bi: str = None,
**kwargs):
"""Sub-method for __init___
Will initialise a EGM signal class
TODO: Fix the self.data reference problem (see
https://stackoverflow.com/questions/6057130/python-deleting-a-class-attribute-in-a-subclass)
See Also
--------
:py:meth:`signalanalysis.signalanalysis.general.Signal.__init__ : Base __init__ method
:py:meth:`signalanalysis.signalanalysis.general.Signal.apply_filter` : Filtering method
:py:meth:`signalanalysis.signalanalysis.general.Signal.get_n_beats` : Beat calculation method
Notes
-----
This used to break the `Liskov substitution principle
<https://en.wikipedia.org/wiki/Liskov_substitution_principle>`_, removing the single `data` attribute to be
replaced by `data_uni` and `data_bi`, but now instead (aims to) just point the `.data` attribute to the
`.data_uni` attribute
"""
super(Egm, self).__init__(**kwargs)
self.t_peaks = pd.DataFrame(dtype=float)
self.n_beats = pd.Series(dtype=int)
# delattr(self, 'data')
self.data_uni = pd.DataFrame(dtype=float)
self.data_bi = pd.DataFrame(dtype=float)
self.beats_uni = dict()
self.beats = self.beats_uni
self.beats_bi = dict()
self.at = pd.DataFrame(dtype=float)
self.rt = pd.DataFrame(dtype=float)
self.ari = pd.DataFrame(dtype=float)
self.dvdt = pd.DataFrame(dtype=float)
self.qrs_start = pd.DataFrame(dtype=float)
self.qrs_end = pd.DataFrame(dtype=float)
self.qrs_duration = pd.DataFrame(dtype=float)
self.read(data_location_uni, data_location_bi, **kwargs)
if self.filter is not None:
self.apply_filter(**kwargs)
self.data = self.data_uni
# self.get_beats(**kwargs)
def read(self,
data_location_uni: str,
data_location_bi: Optional[str] = None,
drop_empty_rows: bool = True,
**kwargs):
""" Read the DxL data for unipolar and bipolar data for EGMs
TODO: Add functionality to read directly from folders, rather than .csv from Matlab
Parameters
----------
data_location_uni : str
Location of unipolar data. Currently only coded to deal with a saved .csv file
data_location_bi : str, optional
Location of bipolar data. Currently only coded to deal with a saved .csv file. Doesn't need to be passed,
default=None
drop_empty_rows : bool, optional
Whether to drop empty data rows from the data, default=True
See Also
--------
:py:meth:`signalanalysis.signalanalysis.egm.Egm.read_from_csv` : Method to read data from Matlab csv
"""
if data_location_uni.endswith('.csv'):
if data_location_bi is not None:
assert data_location_bi.endswith('.csv')
self.read_from_csv(data_location_uni, data_location_bi, **kwargs)
else:
raise IOError("Not coded for this type of input")
if drop_empty_rows:
# PyCharm highlights an error below (bool doesn't have a .all() method), but I'll be damned if I can
# figure out how to fix it - the below option replaces *all* 0.00 values, so will put NaN in an otherwise
# normal trace where it happens to reach 0.00, which is not what we want.
# self.data_uni = (self.data_uni.where(self.data_uni != 0, axis=0)).dropna(axis=1, how='all')
self.data_uni = self.data_uni.loc[:, ~(self.data_uni == 0).all(axis=0)]
if not self.data_bi.empty:
self.data_bi = self.data_bi.loc[:, ~(self.data_bi == 0).all(axis=0)]
assert self.data_uni.shape == self.data_bi.shape, "Error in dropping rows"
return None
def read_from_csv(self,
data_location_uni: str,
data_location_bi: Optional[str],
frequency: float):
""" Read EGM data that has been saved from Matlab
Parameters
----------
data_location_uni : str
Name of the .csv file containing the unipolar data
data_location_bi : str, optional
Name of the .csv file containing the bipolar data
frequency : float
The frequency of the data recording in Hz
Notes
-----
It is not technically required to pass the bipolar data, but it is presented here as a required keyword to
preserve the usage of calling as `read_from_csv(unipolar, bipolar, frequency)`, rather than breaking the data
files arguments up or requiring keywords.
The .csv file should be saved with column representing an individual EGM trace, and each row representing a
single instance in time, i.e.
.. code-block::
egm1(t1), egm2(t1), egm3(t1), ...
egm1(t2), egm2(t2), egm3(t2), ...
...
egm1(tn), egm2(tn), egm3(tn)
Historically, `frequency` has been set to 2034.5 Hz for the importprecision data, an example of which is
can be accessed via ``signalanalysis.data.datafiles.EGM_UNIPOLAR`` and ``signalanalysis.data.datafiles.EGM_BIPOLAR``.
"""
self.data_uni =
|
pd.read_csv(data_location_uni, header=None)
|
pandas.read_csv
|
# coding: utf-8
# # Bike Sharing Dataset Linear Modeling
#
# + Based on Bike Sharing dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset)
# + This notebook is based upon the hourly data file, i.e. hour.csv
# + This notebook showcases linear modeling using linear regression
# ### Problem Statement
# Given the Bike Sharing dataset with hourly level information of bikes along with weather and other attributes, model a system which can predict the bike count.
# ## Import required packages
# In[1]:
get_ipython().magic('matplotlib inline')
# data manuipulation
import numpy as np
import pandas as pd
# modeling utilities
import scipy.stats as stats
from sklearn import metrics
from sklearn import preprocessing
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
# plotting libraries
import matplotlib.pyplot as plt
import seaborn as sn
sn.set_style('whitegrid')
sn.set_context('talk')
params = {'legend.fontsize': 'x-large',
'figure.figsize': (30, 10),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
# ## Load Dataset
# In[2]:
hour_df = pd.read_csv('hour.csv')
print("Shape of dataset::{}".format(hour_df.shape))
# ## Preprocessing
# + Standarize column names
# + Typecast attributes
# + Encode Categoricals using One Hot Encoding
# ### Standarize Column Names
# In[3]:
hour_df.rename(columns={'instant':'rec_id',
'dteday':'datetime',
'holiday':'is_holiday',
'workingday':'is_workingday',
'weathersit':'weather_condition',
'hum':'humidity',
'mnth':'month',
'cnt':'total_count',
'hr':'hour',
'yr':'year'},inplace=True)
# ### Typecast Attributes
# In[4]:
# date time conversion
hour_df['datetime'] = pd.to_datetime(hour_df.datetime)
# categorical variables
hour_df['season'] = hour_df.season.astype('category')
hour_df['is_holiday'] = hour_df.is_holiday.astype('category')
hour_df['weekday'] = hour_df.weekday.astype('category')
hour_df['weather_condition'] = hour_df.weather_condition.astype('category')
hour_df['is_workingday'] = hour_df.is_workingday.astype('category')
hour_df['month'] = hour_df.month.astype('category')
hour_df['year'] = hour_df.year.astype('category')
hour_df['hour'] = hour_df.hour.astype('category')
#
# ### Encode Categoricals (One Hot Encoding)
# In[5]:
def fit_transform_ohe(df,col_name):
"""This function performs one hot encoding for the specified
column.
Args:
df(pandas.DataFrame): the data frame containing the mentioned column name
col_name: the column to be one hot encoded
Returns:
tuple: label_encoder, one_hot_encoder, transformed column as pandas Series
"""
# label encode the column
le = preprocessing.LabelEncoder()
le_labels = le.fit_transform(df[col_name])
df[col_name+'_label'] = le_labels
# one hot encoding
ohe = preprocessing.OneHotEncoder()
feature_arr = ohe.fit_transform(df[[col_name+'_label']]).toarray()
feature_labels = [col_name+'_'+str(cls_label) for cls_label in le.classes_]
features_df =
|
pd.DataFrame(feature_arr, columns=feature_labels)
|
pandas.DataFrame
|
import time
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.preprocessing import scale
from sklearn.decomposition import pca
import fancyimpute
from sklearn.preprocessing import StandardScaler
import xgbfir
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
import config
import work_data
import models
import os
def main():
np.random.seed(42)
logger = config.config_logger(__name__, 10)
t0 = time.time()
train_client_path = './data/raw/csv/train_clientes.csv'
train_reque_path = './data/raw/csv/train_requerimientos.csv'
test_client_path = './data/raw/csv/test_clientes.csv'
test_reque_path = './data/raw/csv/test_requerimientos.csv'
output_path = './output/'
do_merge = False
write_impute_test = False
write_output = False
add_variables = False
version = 6
logger.info('Beginning execution')
logger.info('Load dataframes')
test_client = pd.read_csv(test_client_path, header=0)
test_reque = pd.read_csv(test_reque_path, header=0)
main_client = pd.read_csv(train_client_path, header=0)
main_reque =
|
pd.read_csv(train_reque_path, header=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
import json
import os
import os.path
import pytz
import sys
from helpers import *
# global_dir = "/Volumes/dav/MD2K Processed Data/smoking-lvm-cleaned-data/"
global_dir = "../cleaned-data/"
python_version = int(sys.version[0])
def smoking_episode(participant_zip, participant_id):
# Inputs: zipfile, participant_id
# Output: add to csv (prints when done)
zip_namelist = participant_zip.namelist()
csv_marker = 'PUFFMARKER_SMOKING_EPISODE'
csv_matching = [s for s in zip_namelist if csv_marker in s]
csv_matching = [s for s in csv_matching if '.csv' in s]
if csv_matching == []:
print("No PUFFMARKER_SMOKING_EPISODE data for participant " + str(participant_id))
return
csv_file = participant_zip.open(csv_matching[0])
temp = csv_file.read()
if not temp or temp == 'BZh9\x17rE8P\x90\x00\x00\x00\x00':
print ('Empty file for smoking episode')
else:
csv_file = participant_zip.open(csv_matching[0])
newfile =
|
pd.read_csv(csv_file, header=None)
|
pandas.read_csv
|
##############################################################################
# PyLipID: A python module for analysing protein-lipid interactions
#
# Author: <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
##############################################################################
from collections import defaultdict
from itertools import product
from functools import partial
import pickle
import os
import re
import warnings
import mdtraj as md
import numpy as np
np.seterr(all='ignore')
from scipy.sparse import coo_matrix
import pandas as pd
from tqdm import trange, tqdm
from p_tqdm import p_map
from ..func import cal_contact_residues
from ..func import Duration
from ..func import cal_lipidcount, cal_occupancy
from ..func import get_node_list
from ..func import collect_bound_poses
from ..func import analyze_pose_wrapper, calculate_koff_wrapper, calculate_surface_area_wrapper
from ..plot import plot_surface_area, plot_binding_site_data
from ..plot import plot_residue_data, plot_corrcoef, plot_residue_data_logo
from ..util import check_dir, write_PDB, write_pymol_script, sparse_corrcoef, get_traj_info
class LipidInteraction:
def __init__(self, trajfile_list, cutoffs=[0.475, 0.7], lipid="CHOL", topfile_list=None, lipid_atoms=None,
nprot=1, resi_offset=0, save_dir=None, timeunit="us", stride=1, dt_traj=None):
"""The main class that handles calculation and controls workflow.
``LipidInteraction`` reads trajectory information via `mdtraj.load()`, so it supports most of the trajectory
formats. ``LipidInteraction`` calculates lipid interactions with both protein residues and the calculated
binding sites, and provides a couple of assisting functions to plot data and present data in various forms.
The methods of ``LipidInteraction`` can be divided into three groups based on their roles: one for calculation
of interaction with protein residues, one for binding site and the last that contains assisting functions for
plotting and generating data. Each of the first two groups has a core function to collect/calculate the required
data for the rest of the functions in that group, i.e. ``collect_residue_contacts`` that builds lipid index for
residues as a function of time for residue analysis; and ``compute_binding_sites`` that calculates the binding
sites using the interaction network of the residues. The rest of the methods in each group are independent of
each other.
``LipidInteraction`` also has an attribute, named ``dataset``, which stores the calculation interaction data in
a `pandas.DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_ object
and updates automatically after calculation. It records interaction data for protein residues by
rows, including, for each residue, the interaction residence times, averaged durations, occupancy and lipid
count etc., and binding site IDs and the various interaction data of the belonging binding site.
For the computing-demanding functions of, i.e. ``compute_residue_koff``, ``compute_site_koff``,
``analyze_bound_poses``, and ``compute_surface_area``, PyLipID uses the python multiprocessing library
to speed up the calculation. Users can specify the number of CPUs these functions can use, otherwise all the
CPUs in the system will be used by default.
Parameters
----------
trajfile_list : str or a list of str
Trajectory filename(s). Read by mdtraj.load() to obtain trajectory information.
cutoffs : list of two scalar or a scalar, default=[0.475, 0.7]
Cutoff value(s) for defining contacts. When a list of two scalar are provided, the dual-cutoff scheme
will be used. A contact in the dual-cutoff scheme starts when a lipid gets closer than the lower cutoff,
and ends when this lipid moves farther than the upper cutoff. The duration between the two time points is
the duration of this contact.
lipid : str, default="CHOL"
Lipid name in topology.
topfile_list : str or a list of str, default=None
Topology filename(s). Most trajectory formats do not contain topology information. Provide either
the path to a RCSB PDB file, a trajectory, or a topology for each trajectory in `trajfile_list`
for the topology information. See `mdtraj.load() <https://mdtraj.org>`_. for more information.
lipid_atoms : list of str, default=None
Lipid atom names. Only interactions of the provided atoms will be considered for the calculation of contacts.
If None, all atoms of the lipid molecule will be used.
nprot : int, default=1
Number of protein copies in the system. If the system has N copies of the protein, 'nprot=N' will report
averaged values from the N copies, but 'nprot=1' will report interaction values for each copy.
resi_offset : int, default=0
Shift residue index in the reported results from what is shown in the topology. Can be useful for
MARTINI force field.
save_dir : str, default=None
The root directory to store the generated data. By default, a directory Interaction_{lipid} will be created
in the current working directory, under which all the generated data are stored.
timeunit : {"us", "ns"}, default="us"
The time unit used for reporting results. "us" is micro-second and "ns" is nanosecond.
stride : int, default=1
Only read every stride-th frame. The same stride in mdtraj.load().
dt_traj : float, default=None
Timestep of trajectories. It is required when trajectories do not have timestep information. Not needed for
trajectory formats of e.g. xtc, trr etc. If None, timestep information will take from trajectories.
"""
self._trajfile_list = np.atleast_1d(trajfile_list)
if len(np.atleast_1d(topfile_list)) == len(self._trajfile_list):
self._topfile_list = np.atleast_1d(topfile_list)
elif len(self._trajfile_list) > 1 and len(np.atleast_1d(topfile_list)) == 1:
self._topfile_list = [topfile_list for dummy in self._trajfile_list]
else:
raise ValueError(
"topfile_list should either have the same length as trajfile_list or have one valid file name.")
if len(np.atleast_1d(cutoffs)) == 1:
self._cutoffs = np.array([np.atleast_1d(cutoffs)[0] for dummy in range(2)])
elif len(np.atleast_1d(cutoffs)) == 2:
self._cutoffs = np.sort(np.array(cutoffs, dtype=float))
else:
raise ValueError("cutoffs should be either a scalar or a list of two scalars.")
self._dt_traj = dt_traj
self._lipid = lipid
self._lipid_atoms = lipid_atoms
self._nprot = int(nprot)
self._timeunit = timeunit
self._stride = int(stride)
self._resi_offset = resi_offset
self.dataset = pd.DataFrame()
self._save_dir = check_dir(os.getcwd(), "Interaction_{}".format(self._lipid)) if save_dir is None \
else check_dir(save_dir, "Interaction_{}".format(self._lipid))
return
#############################################
# attributes
#############################################
def dataset(self):
"""Summary of lipid interaction stored in a pandas.DataFrame() object."""
return self.dataset
@property
def residue_list(self):
"""A list of Residue names."""
return self._residue_list
@property
def node_list(self):
"""A list of binding site residue indices. """
return self._node_list
@property
def lipid(self):
"""Lipid residue name."""
return self._lipid
@property
def lipid_atoms(self):
"""Lipid atom names"""
return self._lipid_atoms
@property
def cutoffs(self):
"""Cutoffs used for calculating contacts. """
return self._cutoffs
@property
def nprot(self):
"""Number of protein copies in system. """
return self._nprot
@property
def stride(self):
"""Stride"""
return self._stride
@property
def trajfile_list(self):
"""Trajectory filenames """
return self._trajfile_list
@property
def topfile_list(self):
"""Topology filenames"""
return self._topfile_list
@property
def dt_traj(self):
"""Trajectory timestep"""
return self._dt_traj
@property
def resi_offset(self):
"""Residue index offset"""
return self._resi_offset
@property
def save_dir(self):
"""Root directory for the generated data."""
return self._save_dir
@property
def timeunit(self):
"""Time unit used for reporting results. """
return self._timeunit
def koff(self, residue_id=None, residue_name=None):
"""Residue koff"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
return self._koff[residue_id]
elif residue_id is not None:
return self._koff[residue_id]
elif residue_name is not None:
return self._koff[self._residue_map[residue_name]]
def res_time(self, residue_id=None, residue_name=None):
"""Residue residence time"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
return self._res_time[residue_id]
elif residue_id is not None:
return self._res_time[residue_id]
elif residue_name is not None:
return self._res_time[self._residue_map[residue_name]]
def koff_bs(self, bs_id):
"""Binding site koff"""
return self._koff_BS[bs_id]
def res_time_bs(self, bs_id):
"""Binding site residence time"""
return self._res_time_BS[bs_id]
def residue(self, residue_id=None, residue_name=None, print_data=True):
"""Obtain the lipid interaction information for a residue
Use either residue_id or residue_name to indicate the residue identity.
Return the interaction information in a pandas.DataFrame object.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID that is used by PyLipID for identifying residues. The ID starts from 0, i.e. the ID
of N-th residue is (N-1). If None, all residues are selected.
residue_name : str or list of str, default=None
The residue name as stored in PyLipID dataset. The residue name is in the format of resi+resn
Returns
-------
df : pandas.DataFrame
A pandas.DataFrame of interaction information of the residue.
"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
df = self.dataset[self.dataset["Residue ID"] == residue_id]
elif residue_id is not None:
df = self.dataset[self.dataset["Residue ID"] == residue_id]
elif residue_name is not None:
df = self.dataset[self.dataset["Residue"] == residue_name]
if print_data:
print(df)
return df
def binding_site(self, binding_site_id, print_data=True, sort_residue="Residence Time"):
"""Obtain the lipid interaction information for a binding site.
Use binding site ID to access the information. Return the lipid interaction information of the
binding site in a pandas.DataFrame object. If print_data is True, the binding site info will be
formatted and print out.
"""
df = self.dataset[self.dataset["Binding Site ID"] == binding_site_id].sort_values(by="Residence Time")
if print_data:
text = self._format_BS_print_info(binding_site_id, self._node_list[binding_site_id], sort_residue)
print(text)
return df
########################################
# interaction calculation
########################################
def collect_residue_contacts(self):
r"""Create contacting lipid index for residues.
This function creates contacting lipid index for residues that are used for the rest of calculation in PyLipID.
The design of contacting lipid index is to assist the calculation of contacts using a dual-cutoff scheme, which
considers a lipid as being in contact when the lipid moves closer than the lower cutoff and as being dissociated
when the lipid moves farther than the upper cutoff.
The lipid indices created by this method are stored in the private class variables of
``_contact_residue_high`` and ``_contact_residue_low`` for each of the cutoffs. These indices are python
dictionary objects with residue indices as their keys. For each residue, the lipid index stores the residue index
of contacting lipid molecules from each trajectory frame in a list.
The lipid index of the lower cutoff, i.e. ``_contact_residue_low`` is used to calculate lipid occupancy and lipid
count.
The Pearson correlation matrix of lipid interactions for protein residues is also calculated in this function and
stored in the class variable of ``interaction_corrcoef``.
The class attribute :meth:`~LipidInteraction.dataset` which stores the summary of lipid interaction as a
pandas.DataFrame object, is initialized in this method.
"""
self._protein_ref = None
self._lipid_ref = None
self._T_total = []
self._timesteps = []
self._protein_residue_id = []
# initialise data for interaction matrix
col = []
row = []
data = []
ncol_start = 0
# calculate interactions from trajectories
for traj_idx in trange(len(self._trajfile_list), desc="COLLECT INTERACTIONS FROM TRAJECTORIES",
total=len(self._trajfile_list)):
traj = md.load(self._trajfile_list[traj_idx], top=self._topfile_list[traj_idx], stride=self._stride)
traj_info, self._protein_ref, self._lipid_ref = get_traj_info(traj, lipid=self._lipid,
lipid_atoms=self._lipid_atoms,
resi_offset=self._resi_offset,
nprot=self._nprot,
protein_ref=self._protein_ref,
lipid_ref=self._lipid_ref)
if self._dt_traj is None:
timestep = traj.timestep / 1000000.0 if self._timeunit == "us" else traj.timestep / 1000.0
else:
timestep = float(self._dt_traj * self._stride)
self._T_total.append((traj.n_frames - 1) * timestep)
self._timesteps.append(timestep)
if len(self._protein_residue_id) == 0:
self._protein_residue_id = traj_info["protein_residue_id"]
self._residue_list = traj_info["residue_list"]
self._nresi_per_protein = len(self._residue_list)
self._duration = dict()
self._occupancy = dict()
self._lipid_count = dict()
self._contact_residues_high = {residue_id: [] for residue_id in self._protein_residue_id}
self._contact_residues_low = {residue_id: [] for residue_id in self._protein_residue_id}
self._koff = np.zeros(self._nresi_per_protein)
self._koff_boot = np.zeros(self._nresi_per_protein)
self._r_squared = np.zeros(self._nresi_per_protein)
self._r_squared_boot = np.zeros(self._nresi_per_protein)
self._res_time = np.zeros(self._nresi_per_protein)
self._residue_map = {residue_name: residue_id
for residue_id, residue_name in zip(self._protein_residue_id, self._residue_list)}
else:
assert len(self._protein_residue_id) == len(traj_info["protein_residue_id"]), \
"Trajectory {} contains {} residues whereas trajectory {} contains {} residues".format(
traj_idx, len(traj_info["protein_residue_id"]), traj_idx - 1, len(self._protein_residue_id))
ncol_per_protein = len(traj_info["lipid_residue_atomid_list"]) * traj.n_frames
for protein_idx in np.arange(self._nprot, dtype=int):
for residue_id, residue_atom_indices in enumerate(
traj_info["protein_residue_atomid_list"][protein_idx]):
# calculate interaction per residue
dist_matrix = np.array([np.min(
md.compute_distances(traj, np.array(list(product(residue_atom_indices, lipid_atom_indices))),
periodic=True, opt=True),
axis=1) for lipid_atom_indices in traj_info["lipid_residue_atomid_list"]])
contact_low, frame_id_set_low, lipid_id_set_low = cal_contact_residues(dist_matrix, self._cutoffs[0])
contact_high, _, _ = cal_contact_residues(dist_matrix, self._cutoffs[1])
self._contact_residues_high[residue_id].append(contact_high)
self._contact_residues_low[residue_id].append(contact_low)
# update coordinates for coo_matrix
col.append([ncol_start + ncol_per_protein * protein_idx + lipid_id * traj.n_frames +
frame_id for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low)])
row.append([residue_id for dummy in np.arange(len(frame_id_set_low), dtype=int)])
data.append(dist_matrix[lipid_id_set_low, frame_id_set_low])
ncol_start += ncol_per_protein * self._nprot
# calculate correlation coefficient matrix
row = np.concatenate(row)
col = np.concatenate(col)
data = np.concatenate(data)
contact_info = coo_matrix((data, (row, col)), shape=(self._nresi_per_protein, ncol_start))
self.interaction_corrcoef = sparse_corrcoef(contact_info)
self.dataset = pd.DataFrame({"Residue": [residue for residue in self._residue_list],
"Residue ID": self._protein_residue_id})
return
def compute_residue_duration(self, residue_id=None):
r"""Calculate lipid contact durations for residues
PyLipID calculates lipid contacts using a dual-cutoff scheme. In this scheme, a continuous contact starts when
a molecule moves closer than the lower distance cutoff and ends when the molecule moves out of the upper cutoff.
The duration between these two time points is the duration of the contact.
PyLipID implements this dual-cutoff tactic by creating a lipid index for the lower and upper
cutoff respectively, which records the lipid molecules within that distance cutoff at each trajectory frame
for residues. Such lipid indices are created by the method :meth:`~LipidInteraction.collect_residue_contacts`,
and are stored in the private class variables of ``_contact_residue_high`` and ``_contact_residue_low`` for
each of the cutoffs.
For calculation of contact durations, a lipid molecule that appears in the lipid index of the lower cutoff is
searched in the subsequent frames of the upper lipid index for that residue and the search then stops if this
molecule disappears from the upper cutoff index. This lipid molecule is labeled as 'checked' in the searched
frames in both lipid indices, and the duration of this contact is calculated from the number of frames in which
this lipid molecule appears in the lipid indices. This calculation iterates until all lipid molecules in the
lower lipid index are labeled as 'checked'.
This function returns a list of contact durations or lists of contact durations if multiple residue IDs are
provided.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
durations : list
A list of contact durations or lists of contact durations if multiple residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_duration
Calculate durations of contacts with binding sites.
pylipid.func.Duration
Calculate contact durations from lipid index.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE DURATION PER RESIDUE"):
self._duration[residue_id] = [
Duration(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx],
self._contact_residues_high[residue_id][(traj_idx*self._nprot)+protein_idx],
self._timesteps[traj_idx]).cal_durations()
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Duration"] = [np.mean(np.concatenate(self._duration[residue_id]))
if len(self._duration[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Duration std"] = [np.std(np.concatenate(self._duration[residue_id]))
if len(self._duration[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
return self._duration[residue_id]
else:
return [self._duration[residue_id] for residue_id in selected_residue_id]
def compute_residue_occupancy(self, residue_id=None):
"""Calculate the percentage of frames in which the specified residue formed lipid contacts for residues.
The lipid occupancy is calculated using the lower cutoff, and calculated as the percentage of frames in which
the specified lipid species formed contact with residues within the lower distance cutoff.
The returned occupancy list contains data from all protein copies and all trajectories.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
occupancies : list
A list of lipid occupancies, of length of n_trajs x n_proteins, or lists of lipid occupancies if multiple
residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_occupancy
Calculate binding site occupancy
pylipid.func.cal_occupancy
Calculate the percentage of frames in which a contact is formed.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE OCCUPANCY"):
self._occupancy[residue_id] = [cal_occupancy(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx])
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Occupancy"] = [np.mean(self._occupancy[residue_id])
if len(self._occupancy[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Occupancy std"] = [np.std(self._occupancy[residue_id])
if len(self._occupancy[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
return self._occupancy[residue_id]
else:
return [self._occupancy[residue_id] for residue_id in selected_residue_id]
def compute_residue_lipidcount(self, residue_id=None):
"""Calculate the average number of contacting lipids for residues.
This method calculates the number of specified lipid within the lower distance cutoff to a residue. The
reported value is averaged from the trajectory frames in which interaction between the specified lipid and the
residue is formed. Thus the returned values report the average number of surrounding lipid molecules when
the lipids are bound.
The returned lipid count list contains data from each of the protein copies and each of the trajectories.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
lipidcounts : list
A list of lipid counts, of length of n_trajs x n_proteins, or lists of lipid counts if multiple
residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_lipidcount
Calculate binding site lipid count.
pylipid.func.cal_lipidcount
Calculate the average number of contacting molecules.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE RESIDUE LIPIDCOUNT"):
self._lipid_count[residue_id] = [cal_lipidcount(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx])
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Lipid Count"] = [np.mean(self._lipid_count[residue_id])
if len(self._lipid_count[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Lipid Count std"] = [np.std(self._lipid_count[residue_id])
if len(self._lipid_count[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
self._lipid_count[residue_id]
else:
return [self._lipid_count[residue_id] for residue_id in selected_residue_id]
def compute_residue_koff(self, residue_id=None, nbootstrap=10, initial_guess=[1., 1., 1., 1.],
save_dir=None, plot_data=True, fig_close=True, fig_format="pdf", num_cpus=None):
r"""Calculate interaction koff and residence time for residues.
The koff is calculated from a survival time correlation function which describes the relaxation of the bound
lipids [1]_. Often the interactions between lipid and protein surface are be divided into prolonged interactions and
quick diffusive contacts. Thus PyLipID fits the normalised survival function to a bi-exponential curve which
describes the long and short decay periods.
The survival time correlation function σ(t) is calculated as follow
.. math::
\sigma(t) = \frac{1}{N_{j}} \frac{1}{T-t} \sum_{j=1}^{N_{j}} \sum_{v=0}^{T-t}\tilde{n}_{j}(v, v+t)
where T is the length of the simulation trajectory, :math:`N_{j}` is the total number of lipid contacts and
:math:`\sum_{v=0}^{T-t} \tilde{n}_{j}(v, v+t)` is a binary function that takes the value 1 if the contact of
lipid j lasts from time ν to time v+t and 0 otherwise. The values of :math:`\sigma(t)` are calculated for every
value of t from 0 to T ns, for each time step of the trajectories, and normalized by dividing by :math:`\sigma(t)`,
so that the survival time-correlation function has value 1 at t = 0.
The normalized survival function is then fitted to a biexponential to model the long and short decays of
lipid relaxation:
.. math::
\sigma(t) \sim A e^{-k_{1} t}+B e^{-k_{2} t}\left(k_{1} \leq k_{2}\right)
PyLipID takes :math:`k_{1}` as the the dissociation :math:`k_{off}`, and calculates the residence time as
:math:`\tau=1 / k_{off}`. PyLipID raises a warning for the impact on the accuracy of :math:`k_{off}`
calculation if trajectories are of different lengths when multiple trajectories are provided. PyLipID measures
the :math:`r^{2}` of the biexponential fitting to the survival function to show the quality of the
:math:`k_{off}` estimation. In addition, PyLipID bootstraps the contact durations and measures the
:math:`k_{off}` of the bootstrapped data, to report how well lipid contacts are sampled from simulations. The
lipid contact sampling, the curve-fitting and the bootstrap results can be conveniently checked via the
:math:`k_{off}` plot.
The calculation of koff for residues can be time-consuming, thus PyLipID uses python multiprocessing to
parallize the calculation. The number of CPUs used for multiprocessing can be specificed, otherwise all the
available CPUs will be used by default.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
nbootstrap : int, default=10
Number of bootstrap on the interaction durations. For each bootstrap, samples of the size of the original
dataset are drawn from the collected durations with replacement. :math:`k_{koff}` and :math:`r^{2}` are
calculated for each bootstrap.
initial_guess : array_like, default=None
The initial guess for the curve-fitting of the biexponential curve. Used by scipy.optimize.curve_fit.
save_dir : str, default=None
The the directory for saving the koff figures of residues if plot_data is True. By default, the koff figures
are saved in the directory of Reisidue_koffs_{lipid} under the root directory defined when ``LipidInteraction``
was initiated.
plot_data : bool, default=True
If True, plot the koff figures fir residues.
fig_close : bool, default=True
Use matplotlib.pyplot.close() to close the koff figures. Can save memory if many figures are open and plotted.
fig_format : str, default="pdf"
The format of koff figures. Support formats that are supported by matplotlib.pyplot.savefig().
num_cpus : int or None, default=None
Number of CPUs used for multiprocessing. If None, all the available CPUs will be used.
Returns
---------
koff : scalar or list of scalar
The calculated koffs for selected residues.
restime : scalar or list of scalar
The calculated residence times for selected residues.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_koff
Calculate binding site koffs and residence times.
pylipid.func.cal_koff
Calculate residence time and koff.
pylipid.func.cal_survival_func
Compute the normalised survival function.
References
-----------
.. [1] García, <NAME>, Lewis. Computation of the mean residence time of water in the hydration shells
of biomolecules. 1993. Journal of Computational Chemistry.
"""
self._check_calculation("Duration", self.compute_residue_duration)
if plot_data:
koff_dir = check_dir(save_dir, "Reisidue_koffs_{}".format(self._lipid)) if save_dir is not None \
else check_dir(self._save_dir, "Residue_koffs_{}".format(self._lipid))
if len(set(self._residue_list)) != len(self._residue_list):
residue_name_set = ["{}_ResidueID{}".format(residue, residue_id) for residue, residue_id in
zip(self._residue_list, self._protein_residue_id)]
else:
residue_name_set = self._residue_list
if residue_id is not None:
selected_residue_id = np.atleast_1d(residue_id)
else:
selected_residue_id = self._protein_residue_id
residues_missing_durations = [residue_id for residue_id in selected_residue_id
if len(self._duration[residue_id]) == 0]
if len(residues_missing_durations) > 0:
self.compute_residue_duration(residue_id=residues_missing_durations)
t_total = np.max(self._T_total)
same_length = np.all(np.array(self._T_total) == t_total)
if not same_length:
warnings.warn(
"Trajectories have different lengths. This will impair the accuracy of koff calculation!")
timestep = np.min(self._timesteps)
same_timestep = np.all(np.array(self._timesteps) == timestep)
if not same_timestep:
warnings.warn(
"Trajectories have different timesteps. This will impair the accuracy of koff calculation!")
if plot_data:
fn_set = [os.path.join(koff_dir, "{}.{}".format(residue_name_set[residue_id], fig_format))
for residue_id in selected_residue_id]
else:
fn_set = [False for dummy in selected_residue_id]
returned_values = p_map(partial(calculate_koff_wrapper, t_total=t_total, timestep=timestep, nbootstrap=nbootstrap,
initial_guess=initial_guess, plot_data=plot_data, timeunit=self._timeunit,
fig_close=fig_close),
[np.concatenate(self._duration[residue_id]) for residue_id in selected_residue_id],
[residue_name_set[residue_id] for residue_id in selected_residue_id],
fn_set, num_cpus=num_cpus, desc="CALCULATE KOFF FOR RESIDUES")
for residue_id, returned_value in zip(selected_residue_id, returned_values):
self._koff[residue_id] = returned_value[0]
self._res_time[residue_id] = returned_value[1]
self._r_squared[residue_id] = returned_value[2]
self._koff_boot[residue_id] = returned_value[3]
self._r_squared_boot[residue_id] = returned_value[4]
# update dataset
self.dataset["Koff"] = self._koff
self.dataset["Residence Time"] = self._res_time
self.dataset["R Squared"] = self._r_squared
self.dataset["Koff Bootstrap avg"] = self._koff_boot
self.dataset["R Squared Bootstrap avg"] = self._r_squared_boot
if len(selected_residue_id) == 1:
return self._koff[selected_residue_id[0]], self._res_time[selected_residue_id[0]]
else:
return [self._koff[residue_id] for residue_id in selected_residue_id], \
[self._res_time[residue_id] for residue_id in selected_residue_id]
def compute_binding_nodes(self, threshold=4, print_data=True):
r"""Calculate binding sites.
Binding sites are defined based on a community analysis of protein residue-interaction networks that are created
from the lipid interaction correlation matrix. Given the definition of a lipid binding site, namely a
cluster of residues that bind to the same lipid molecule at the same time, PyLipID creates a distance vector
for each residue that records the distances to all lipid molecules as a function of time, and calculate the
Pearson correlation matrix of protein residues for binding the same lipid molecules. This correlation matrix is
calculated by :meth:`~LipidInteraction.collect_residue_contacts()` and stored in the class variable
``interaction_corrcoef``.
The protein residue interaction network is constructed based on the Pearson correlation matrix.
In this network, the nodes are the protein residues and the weights are the Pearson correlation
coefficients of pairs of residues. The interaction network is then decomposed into sub-units or communities,
which are groups of nodes that are more densely connected internally than with the rest of the network.
For the calculation of communities, the Louvain algorithm [1]_ is used to find high modularity network partitions.
Modularity, which measures the quality of network partiions, is defined as [2]_
.. math::
Q=\frac{1}{2 m} \sum_{i, j}\left[A_{i j}-\frac{k_{i} k_{j}}{2 m}\right] \delta\left(c_{i}, c_{j}\right)
where :math:`A_{i j}` is the weight of the edge between node i and node j; :math:`k_{i}` is the sum of weights
of the nodes attached to the node i, i.e. the degree of the node; :math:`c_{i}` is the community to which node i
assigned; :math:`\delta\left(c_{i}, c_{j}\right)` is 1 if i=j and 0 otherwise; and
:math:`m=\frac{1}{2} \sum_{i j} A_{i j}` is the number of edges. In the modularity optimization, the Louvain
algorithm orders the nodes in the network, and then, one by one, removes and inserts each node in a different
community c_i until no significant increase in modularity. After modularity optimization, all the nodes that
belong to the same community are merged into a single node, of which the edge weights are the sum of the weights
of the comprising nodes. This optimization-aggregation loop is iterated until all nodes are collapsed into one.
By default, this method returns binding sites of at least 4 residues. This filtering step is particularly helpful
for analysis on smaller amount of trajectory frames, in which false correlation is more likely to happen among
2 or 3 residues.
Parameters
----------
threshold : int, default=4
The minimum size of binding sites. Only binding sites with more residues than the threshold will be returned.
print : bool, default=True
If True, print a summary of binding site information.
Returns
-------
node_list: list
Binding site node list, i.e. a list of binding sites which contains sets of binding site residue indices
modularity : float or None
The modularity of network partition. It measure the quality of network partition. The value is between 1 and
-1. The bigger the modularity, the better the partition.
See Also
--------
pylipid.func.get_node_list
Calculates community structures in interaction network.
References
----------
.. [1] <NAME>.; <NAME>.; <NAME>.; <NAME>., Fast unfolding of communities in large
networks. Journal of Statistical Mechanics: Theory and Experiment 2008, 2008 (10), P10008
.. [2] <NAME>., Analysis of weighted networks. Physical Review E 2004, 70 (5), 056131.
"""
self._check_calculation("Residue", self.compute_residue_koff)
corrcoef_raw = np.nan_to_num(self.interaction_corrcoef)
corrcoef = np.copy(corrcoef_raw)
node_list, modularity = get_node_list(corrcoef, threshold=threshold)
self._node_list = node_list
self._network_modularity = modularity
if len(self._node_list) == 0:
print("*"*30)
print(" No binding site detected!!")
print("*"*30)
else:
residue_BS_identifiers = np.ones(self._nresi_per_protein, dtype=int) * -1
for bs_id, nodes in enumerate(self._node_list):
residue_BS_identifiers[nodes] = int(bs_id)
# update dataset
self.dataset["Binding Site ID"] = residue_BS_identifiers
# initialise variable for binding site interactions
self._duration_BS = dict()
self._occupancy_BS = dict()
self._lipid_count_BS = dict()
self._koff_BS = np.zeros(len(self._node_list))
self._koff_BS_boot = np.zeros(len(self._node_list))
self._res_time_BS = np.zeros(len(self._node_list))
self._r_squared_BS = np.zeros(len(self._node_list))
self._r_squared_BS_boot = np.zeros(len(self._node_list))
if print_data:
print(f"Network modularity: {modularity:.3f}")
for bs_id, nodes in enumerate(self._node_list):
print("#" * 25)
print(f"Binding Site ID: {bs_id}")
print("{:>10s} -- {:<12s}".format("Residue", "Residue ID"))
for node in nodes:
print("{:>10s} -- {:<12d}".format(self._residue_list[node], self._protein_residue_id[node]))
print("#" * 25)
return node_list, modularity
def compute_site_duration(self, binding_site_id=None):
"""Calculate interaction durations for binding sites.
PyLipID calculates lipid contacts using a dual-cutoff scheme. In this scheme, a continuous contact starts when
a molecule moves closer than the lower distance cutoff and ends when the molecule moves out of the upper cutoff.
The duration between these two time points is the duration of the contact.
PyLipID implements this dual-cutoff tactic by creating a lipid index for the lower and upper
cutoff respectively, which records the lipid molecules within that distance cutoff at each trajectory frame
for residues. Such lipid indices are created by the method :meth:`~LipidInteraction.collect_residue_contacts`,
and are stored in the private class variables of ``_contact_residue_high`` and ``_contact_residue_low`` for
each of the cutoffs.
For calculating contacts for binding sites, the interacting lipid molecules with binding site residues are
merged with duplicates removed to form the lipid indices for the upper cutoff and lower cutoff respectively.
Similar to the calculation of residues, a contact duration of a binding sites are calculated as the duration
between the time point of a lipid molecule appearing in the lipid index of the lower cutoff and of this molecule
disappeared from the upper cutoff index.
This function returns a list of contact durations or lists of contact durations if multiple binding site IDs are
provided.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
Returns
-------
durations_BS : list
A list of contact durations or lists of contact durations if multiple binding site IDs are provided.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_residue_duration
Calculate residue contact durations.
pylipid.func.Duration
Calculate contact durations from lipid index.
"""
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
selected_bs_id = np.atleast_1d(binding_site_id) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
for bs_id in tqdm(selected_bs_id, desc="CALCULATE DURATION PER BINDING SITE"):
nodes = self._node_list[bs_id]
durations_BS = []
for traj_idx in np.arange(len(self._trajfile_list), dtype=int):
for protein_idx in np.arange(self._nprot, dtype=int):
list_to_take = traj_idx * self._nprot + protein_idx
n_frames = len(self._contact_residues_low[nodes[0]][list_to_take])
contact_BS_low = [np.unique(np.concatenate(
[self._contact_residues_low[node][list_to_take][frame_idx] for node in nodes]))
for frame_idx in np.arange(n_frames)]
contact_BS_high = [np.unique(np.concatenate(
[self._contact_residues_high[node][list_to_take][frame_idx] for node in nodes]))
for frame_idx in np.arange(n_frames)]
durations_BS.append(
Duration(contact_BS_low, contact_BS_high, self._timesteps[traj_idx]).cal_durations())
self._duration_BS[bs_id] = durations_BS
# update dataset
durations_BS_per_residue = np.zeros(self._nresi_per_protein)
for bs_id, nodes in enumerate(self._node_list):
durations_BS_per_residue[nodes] = np.mean(np.concatenate(self._duration_BS[bs_id])) \
if len(self._duration_BS[bs_id]) > 0 else 0
self.dataset["Binding Site Duration"] = durations_BS_per_residue
if len(selected_bs_id) == 1:
return self._duration_BS[bs_id]
else:
return [self._duration_BS[bs_id] for bs_id in selected_bs_id]
def compute_site_occupancy(self, binding_site_id=None):
"""Calculate the percentage of frames in which the specified lipid contacts are formed for binding sites.
Similar to calculation on residues, the lipid occupancy is calculated using the lower cutoff, and calculated as
the percentage of frames in which the specified lipid species formed contact with the binding site within the
lower distance cutoff. The lipid index for a binding site is generated from merging, with duplicates removed,
The returned list of occupancies contains data from all protein copies and all trajectories.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
Returns
-------
occupancy_BS : list
A list of lipid occupancies or lists of lipid occupancies if multiple binding site IDs are provided.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_residue_occupancy
Calculate lipid occupancy for residues.
pylipid.func.cal_occupancy
Calculate the percentage of frames in which a contact is formed.
"""
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
selected_bs_id = np.atleast_1d(binding_site_id) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
for bs_id in tqdm(selected_bs_id, desc="CALCULATE OCCUPANCY PER BINDING SITE"):
nodes = self._node_list[bs_id]
occupancy_BS = []
for traj_idx in np.arange(len(self._trajfile_list), dtype=int):
for protein_idx in np.arange(self._nprot, dtype=int):
list_to_take = traj_idx * self._nprot + protein_idx
n_frames = len(self._contact_residues_low[nodes[0]][list_to_take])
contact_BS_low = [np.unique(np.concatenate(
[self._contact_residues_low[node][list_to_take][frame_idx] for node in nodes]))
for frame_idx in np.arange(n_frames)]
occupancy_BS.append(cal_occupancy(contact_BS_low))
self._occupancy_BS[bs_id] = occupancy_BS
# update dataset
occupancy_BS_per_residue = np.zeros(self._nresi_per_protein)
for bs_id, nodes in enumerate(self._node_list):
occupancy_BS_per_residue[nodes] = np.mean(self._occupancy_BS[bs_id]) \
if len(self._occupancy_BS[bs_id]) > 0 else 0
self.dataset["Binding Site Occupancy"] = occupancy_BS_per_residue
if len(selected_bs_id) == 1:
return self._occupancy_BS[bs_id]
else:
return [self._occupancy_BS[bs_id] for bs_id in selected_bs_id]
def compute_site_lipidcount(self, binding_site_id=None):
"""Calculate the average number of contacting lipids for binding site.
This method calculates the number of specified lipid within the lower distance cutoff to a binding site. The
reported value is averaged from the trajectory frames in which interaction between the specified lipid and the
binding site is formed. Thus the returned values report the average number of surrounding lipid molecules when
the lipids are bound.
The returned lipid count list contains data from each of the protein copies and each of the trajectories.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
Returns
-------
lipidcount_BS : list
A list of lipid counts or lists of lipid counts if multiple binding site IDs are provided.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_residue_lipidcount
Calculate lipid count for residues.
pylipid.func.cal_lipidcount
Calculate the average number of contacting molecules.
"""
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
selected_bs_id = np.atleast_1d(binding_site_id) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
for bs_id in tqdm(selected_bs_id, desc="CALCULATE LIPIDCOUNT PER BINDING SITE"):
nodes = self._node_list[bs_id]
lipidcount_BS = []
for traj_idx in np.arange(len(self._trajfile_list), dtype=int):
for protein_idx in np.arange(self._nprot, dtype=int):
list_to_take = traj_idx * self._nprot + protein_idx
n_frames = len(self._contact_residues_low[nodes[0]][list_to_take])
contact_BS_low = [np.unique(np.concatenate(
[self._contact_residues_low[node][list_to_take][frame_idx] for node in nodes]))
for frame_idx in np.arange(n_frames)]
lipidcount_BS.append(cal_lipidcount(contact_BS_low))
self._lipid_count_BS[bs_id] = lipidcount_BS
# update dataset
lipidcount_BS_per_residue = np.zeros(self._nresi_per_protein)
for bs_id, nodes in enumerate(self._node_list):
lipidcount_BS_per_residue[nodes] = np.mean(self._lipid_count_BS[bs_id]) \
if len(self._lipid_count_BS[bs_id]) > 0 else 0
self.dataset["Binding Site Lipid Count"] = lipidcount_BS_per_residue
if len(selected_bs_id) == 1:
return self._lipid_count_BS[bs_id]
else:
return [self._lipid_count_BS[bs_id] for bs_id in selected_bs_id]
def compute_site_koff(self, binding_site_id=None, nbootstrap=10, initial_guess=[1., 1., 1., 1.],
save_dir=None, plot_data=True, fig_close=True, fig_format="pdf", num_cpus=None):
r"""Calculate interactions koff and residence time for binding sites.
The koff is calculated from a survival time correlation function which describes the relaxation of the bound
lipids [1]_. Often the interactions between lipid and protein surface are be divided into prolonged interactions and
quick diffusive contacts. Thus PyLipID fits the normalised survival function to a bi-exponential curve which
describes the long and short decay periods.
The survival time correlation function σ(t) is calculated as follow
.. math::
\sigma(t) = \frac{1}{N_{j}} \frac{1}{T-t} \sum_{j=1}^{N_{j}} \sum_{v=0}^{T-t}\tilde{n}_{j}(v, v+t)
where T is the length of the simulation trajectory, :math:`N_{j}` is the total number of lipid contacts and
:math:`\sum_{v=0}^{T-t} \tilde{n}_{j}(v, v+t)` is a binary function that takes the value 1 if the contact of
lipid j lasts from time ν to time v+t and 0 otherwise. The values of :math:`\sigma(t)` are calculated for every
value of t from 0 to T ns, for each time step of the trajectories, and normalized by dividing by :math:`\sigma(t)`,
so that the survival time-correlation function has value 1 at t = 0.
The normalized survival function is then fitted to a biexponential to model the long and short decays of
lipid relaxation:
.. math::
\sigma(t) \sim A e^{-k_{1} t}+B e^{-k_{2} t}\left(k_{1} \leq k_{2}\right)
PyLipID takes :math:`k_{1}` as the the dissociation :math:`k_{off}`, and calculates the residence time as
:math:`\tau=1 / k_{o f f}`. PyLipID raises a warning for the impact on the accuracy of :math:`k_{off}`
calculation if trajectories are of different lengths when multiple trajectories are provided. PyLipID measures
the :math:`r^{2}` of the biexponential fitting to the survival function to show the quality of the
:math:`k_{off}` estimation. In addition, PyLipID bootstraps the contact durations and measures the
:math:`k_{off}` of the bootstrapped data, to report how well lipid contacts are sampled from simulations. The
lipid contact sampling, the curve-fitting and the bootstrap results can be conveniently checked via the
:math:`k_{off}` plot.
The durations of lipid contact with binding sites are calculated using
:meth:`~LipidInteraction.compute_site_duration`. See its page for the definition of lipid contact
with binding site.
The calculation of koff for binding sites can be time-consuming, thus PyLipID uses python multiprocessing to
parallize the calculation. The number of CPUs used for multiprocessing can be specificed, otherwise all the
available CPUs will be used by default.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
nbootstrap : int, default=10
Number of bootstrap on the interaction durations. For each bootstrap, samples of the size of the original
dataset are drawn from the collected durations with replacement. :math:`k_{koff}` and :math:`r^{2}` are
calculated for each bootstrap.
initial_guess : array_like, default=None
The initial guess for the curve-fitting of the biexponential curve. Used by scipy.optimize.curve_fit.
save_dir : str, default=None
The the directory for saving the koff figures of residues if plot_data is True. By default, the koff figures
are saved in the directory of Binding_Sites_koffs_{lipid} under the root directory defined when ``LipidInteraction``
was initiated.
plot_data : bool, default=True
If True, plot the koff figures fir residues.
fig_close : bool, default=True
Use matplotlib.pyplot.close() to close the koff figures. Can save memory if many figures are open and plotted.
fig_format : str, default="pdf"
The format of koff figures. Support formats that are supported by matplotlib.pyplot.savefig().
num_cpus : int or None, default=None
Number of CPUs used for multiprocessing. If None, all the available CPUs will be used.
Returns
---------
koff : scalar or list of scalar
The calculated koffs for selected binding sites.
restime : scalar or list of scalar
The calculated residence times for selected binding sites.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_residue_koff
Calculate koffs and residence times for residues.
pylipid.func.cal_koff
Calculate residence time and koff.
pylipid.func.cal_survival_func
Compute the normalised survival function.
References
-----------
.. [1] García, <NAME>, Lewis. Computation of the mean residence time of water in the hydration shells
of biomolecules. 1993. Journal of Computational Chemistry.
"""
self._check_calculation("Binding Site Duration", self.compute_site_duration)
if plot_data:
BS_dir = check_dir(save_dir, "Binding_Sites_koffs_{}".format(self._lipid)) if save_dir is not None \
else check_dir(self._save_dir, "Binding_Sites_koffs_{}".format(self._lipid))
selected_bs_id = np.atleast_1d(binding_site_id) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
binding_sites_missing_durations = [bs_id for bs_id in selected_bs_id
if len(self._duration_BS[bs_id]) == 0]
if len(binding_sites_missing_durations) > 0:
self.compute_site_duration(binding_site_id=binding_sites_missing_durations)
t_total = np.max(self._T_total)
same_length = np.all(np.array(self._T_total) == t_total)
if not same_length:
warnings.warn(
"Trajectories have different lengths. This will impair the accuracy of koff calculation!")
timestep = np.min(self._timesteps)
same_timestep = np.all(np.array(self._timesteps) == timestep)
if not same_timestep:
warnings.warn(
"Trajectories have different timesteps. This will impair the accuracy of koff calculation!")
if plot_data:
fn_set = [os.path.join(BS_dir, f"BS_id{bs_id}.{fig_format}") for bs_id in selected_bs_id]
else:
fn_set = [False for dummy in selected_bs_id]
returned_values = p_map(partial(calculate_koff_wrapper, t_total=t_total, timestep=timestep, nbootstrap=nbootstrap,
initial_guess=initial_guess, plot_data=plot_data, timeunit=self._timeunit,
fig_close=fig_close),
[np.concatenate(self._duration_BS[bs_id]) for bs_id in selected_bs_id],
[f"Binding Site {bs_id}" for bs_id in selected_bs_id],
fn_set, num_cpus=num_cpus, desc="CALCULATE KOFF FOR BINDING SITES")
for bs_id, returned_value in zip(selected_bs_id, returned_values):
self._koff_BS[bs_id] = returned_value[0]
self._res_time_BS[bs_id] = returned_value[1]
self._r_squared_BS[bs_id] = returned_value[2]
self._koff_BS_boot[bs_id] = returned_value[3]
self._r_squared_BS_boot[bs_id] = returned_value[4]
# update dataset
for data, column_name in zip(
[self._koff_BS, self._koff_BS_boot, self._res_time_BS, self._r_squared_BS, self._r_squared_BS_boot],
["Binding Site Koff", "Binding Site Koff Bootstrap avg", "Binding Site Residence Time",
"Binding Site R Squared", "Binding Site R Squared Bootstrap avg"]):
data_per_residue = np.zeros(self._nresi_per_protein)
for bs_id, nodes in enumerate(self._node_list):
data_per_residue[nodes] = data[bs_id]
self.dataset[column_name] = data_per_residue
if len(selected_bs_id) == 1:
return self._koff_BS[bs_id], self._res_time_BS[bs_id]
else:
return [self._koff_BS[bs_id] for bs_id in selected_bs_id], \
[self._res_time_BS[bs_id] for bs_id in selected_bs_id]
def analyze_bound_poses(self, binding_site_id=None, n_top_poses=3, n_clusters="auto", pose_format="gro",
score_weights=None, kde_bw=0.15, pca_component=0.90, plot_rmsd=True, save_dir=None,
eps=None, min_samples=None, metric="euclidean",
fig_close=False, fig_format="pdf", num_cpus=None):
r"""Analyze bound poses for binding sites.
This function can find representative bound poses, cluster the bound poses and calculate pose RMSD for
binding sites.
If ``n_top_poses`` is an integer larger than 0, this method will find the representative bound poses for the specified
binding sites. To do so, it evaluates all the bound poses in a binding site using a density-based scoring function
and ranks the poses using based on the scores. The scoring function is defined as:
.. math::
\text { score }=\sum_{i} W_{i} \cdot \hat{f}_{i, H}(D)
where :math:`W_{i}` is the weight given to atom i of the lipid molecule, H is the bandwidth and
:math:`\hat{f}_{i, H}(D)` is a multivariate kernel density etimation of the position of atom i in the specified
binding site. :math:`\hat{f}_{i, H}(D)` is calculated from all the bound lipid poses in that binding site.
The position of atom i is a `p`-variant vector, :math:`\left[D_{i 1}, D_{i 2}, \ldots, D_{i p}\right]` where
:math:`D_{i p}` is the minimum distance to the residue `p` of the binding site. The multivariant kernel density
is estimated by `KDEMultivariate
<https://www.statsmodels.org/devel/generated/statsmodels.nonparametric.kernel_density.KDEMultivariate.html>`_
provided by Statsmodels. Higher weights can be given to e.g. the headgroup atoms of phospholipids, to generate
better defined binding poses, but all lipid atoms are weighted equally by default. The use of relative positions
of lipid atoms in their binding site makes the analysis independent of the conformational changes in the rest
part of the protein.
If ``n_clusters`` is given an integer larger than 0, this method will cluster the lipid bound poses in the specified
binding site using `KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_
provided by scikit. The KMeans cluster separates the samples into `n` clusters of equal variances, via minimizing
the `inertia`, which is defined as:
.. math::
\sum_{i=0}^{n} \min _{u_{i} \in C}\left(\left\|x_{i}-u_{i}\right\|^{2}\right)
where :math:`u_{i}` is the `centroid` of cluster i. KMeans scales well with large dataset but performs poorly
with clusters of varying sizes and density, which are often the case for lipid poses in a binding site.
If ``n_clusters`` is set to `auto`, this method will cluster the bound poses using a density-based cluster
`DBSCAN <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_ provided by scikit.
DBSCAN finds clusters of core samples of high density. A sample point is a core sample if at least `min_samples`
points are within distance :math:`\varepsilon` of it. A cluster is defined as a set of sample points that are
mutually density-connected and density-reachable, i.e. there is a path
:math:`\left\langle p_{1}, p_{2}, \ldots, p_{n}\right\rangle` where each :math:`p_{i+1}` is within distance
:math:`\varepsilon` of :math:`p_{i}` for any two p in the two. The values of `min_samples` and :math:`\varepsilon`
determine the performance of this cluster. If None, `min_samples` takes the value of 2 * ndim. If
:math:`\varepsilon` is None, it is set as the value at the knee of the k-distance plot.
For writing out the cluster poses, this method will randomly select one pose from each cluster in the case of
using KMeans or one from the core samples of each cluster when DBSCAN is used, and writes the selected lipid
pose with the protein conformation to which it binds using MDTraj, in the provided pose format.
The root mean square deviation (RMSD) of a lipid bound pose in a binding site is calculated from the relative
position of the pose in the binding site compared to the average position of the bound poses. Thus, the pose
RMSD is defined as:
.. math::
RMSD=\sqrt{\frac{\sum_{i=0}^{N} \sum_{j=0}^{M}\left(D_{i j}-\bar{D}_{j}\right)^{2}}{N}}
where :math:`D_{i j}` is the distance of atom `i` to the residue `j` in the binding site; :math:`\bar{D}_{j}` is the
average distance of atom `i` from all bound poses in the binding site to residue `j`; `N` is the number of atoms in
the lipid molecule and `M` is the number of residues in the binding site.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
n_top_poses : int, default=3
Number of representative bound poses written out for the selected binding site.
n_clusters : int or 'auto'
Number of clusters to form for bound poses of the selected binding site. If ``n_clusters`` is set to 'auto'`, the
density-based clusterer DBSCAN will be used. If ``n_clusters`` is given a non-zero integer, KMeans is used.
pose_format : str, default="gro"
The coordinate format the representative poses and clsutered poses are saved with. Support the formats
that are included in MDtraj.save().
score_weights : dict or None, default=None
The weights given to atoms in the scoring function for finding the representative bound poses. It should in
the format of a Python dictionary {atom name: weight}. The atom name should be consisten with the topology.
By default, all atoms in the lipid molecule are weighted equally.
kde_bw : scalar, default=0.15
The bandwidth for the Gaussian kernel. Used in the density estimation of the lipid atom coordinates in the binding
site. Used by the function
`KDEMultivariate <https://www.statsmodels.org/devel/generated/statsmodels.nonparametric.kernel_density.KDEMultivariate.html>`_ .
pca_component : int, float or ‘mle’, default=0.90
The PCA used to decrease the dimensions of lipid atom coordinates. The coordinate of a lipid atom in
the binding site is expressed as a distance vector of the minimum distances to the residues in that binding site,
i.e. :math:`[D_{i 1}, D_{i 2}, .., D_{i p}]`. This can be in high dimensions. Hence, PCA is used on this distance
vector prior to calculation of the density. This PCA is carried out by
`PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_ in sci-kit.
plot_rmsd : bool, default=True
Plot the binding site RMSDs in a violinplot.
eps : float or None, default=None
The minimum neighbour distance used by
`DBSCAN <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_
if None, the value will determined from as the elbow point of the sorted minimum neighbour distance
of all the data points.
min_samples : int or None, default=None
The minimum number of samples to be considered as core samples used by
`DBSCAN <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_ .
If None, the value will be automatically determined based on the size of data.
metric : str, default='euclidean'
The metric used to calculated neighbour distance used by
`DBSCAN <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_ .
Default is the Euclidean distance.
fig_close : bool, default=False
This parameter control whether to close the plotted figures using plt.close(). It can save memory if
many figures are generated.
fig_format : str, default="pdf"
Figure format. Support formats included in matplotlib.pyplot.
num_cpus : int or None default=None
The number of CPUs used for the tasks of ranking the poses and clustering poses. Python multiprocessing deployed by
`p_tqdm <https://github.com/swansonk14/p_tqdm>`_ is used to speed up these calculations.
save_dir : str or None, default=None
The root directory for saving the pose analysis results. If None, the root directory set at the initiation of
``LipidInteraction`` will be used. The representative poses and clustered poses will be stored in the directory
of Bound_Poses_{lipid} under the root directory.
Returns
-------
pose_pool : dict
Coordinates of the bound poses for the selected binding sites stored in a python dictionary
{binding_site_id: pose coordinates}. The poses coordinates include lipid coordinates and those of the receptor
at the time the pose was bound. The pose coordinates are stored in a
`mdtraj.Trajectory <https://mdtraj.org/1.9.4/api/generated/mdtraj.Trajectory.html>`_ object.
rmsd_data : pandas.DataFrame
Bound poses RMSDs are stored by columns with column name of binding site id.
See Also
--------
pylipid.func.collect_bound_poses
Collect bound pose coordinates from trajectories.
pylipid.func.vectorize_poses
Convert bound poses into distance vectors.
pylipid.func.calculate_scores
Score the bound poses based on the probability density function of the position of lipid atoms
pylipid.func.analyze_pose_wrapper
A wrapper function that ranks poses, clusters poses and calculates pose RMSD
"""
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
pose_dir = check_dir(save_dir, "Bound_Poses_{}".format(self._lipid)) if save_dir is not None \
else check_dir(self._save_dir, "Bound_Poses_{}".format(self._lipid))
if "Binding Site RMSD" in self.dataset.columns:
pose_rmsd_per_residue = self.dataset.columns["Binding Site Pose RMSD"]
else:
pose_rmsd_per_residue = np.zeros(self._nresi_per_protein)
if binding_site_id is not None:
selected_bs_id = np.atleast_1d(binding_site_id)
else:
selected_bs_id = np.arange(len(self._node_list), dtype=int)
# store bound lipid poses
selected_bs_map = {bs_id: self._node_list[bs_id] for bs_id in selected_bs_id}
pose_traj, pose_info = collect_bound_poses(selected_bs_map, self._contact_residues_low, self._trajfile_list,
self._topfile_list, self._lipid, self._protein_ref, self._lipid_ref,
stride=self._stride, nprot=self._nprot)
protein_atom_indices = [[atom.index for atom in residue.atoms]
for residue in self._protein_ref.top.residues]
lipid_atom_indices = [self._protein_ref.n_atoms + atom_idx
for atom_idx in np.arange(self._lipid_ref.n_atoms)]
atom_weights = {atom_idx: 1 for atom_idx in np.arange(self._lipid_ref.n_atoms)}
if score_weights is not None:
translate = {atom_idx: score_weights[self._lipid_ref.top.atom(atom_idx).name]
for atom_idx in np.arange(self._lipid_ref.n_atoms)
if self._lipid_ref.top.atom(atom_idx).name in score_weights.keys()}
atom_weights.update(translate)
if n_top_poses > 0:
# multiprocessing wrapped under p_tqdm
rmsd_set = p_map(partial(analyze_pose_wrapper, protein_atom_indices=protein_atom_indices,
lipid_atom_indices=lipid_atom_indices, n_top_poses=n_top_poses,
pose_dir=pose_dir, atom_weights=atom_weights, kde_bw=kde_bw,
pca_component=pca_component, pose_format=pose_format, n_clusters=n_clusters,
eps=eps, min_samples=min_samples, metric=metric,
trajfile_list=self._trajfile_list),
selected_bs_id, [pose_traj[bs_id] for bs_id in selected_bs_id],
[self._node_list[bs_id] for bs_id in selected_bs_id],
[pose_info[bs_id] for bs_id in selected_bs_id], num_cpus=num_cpus, desc="ANALYZE BOUND POSES")
RMSD_set = {}
for bs_id, rmsd in zip(selected_bs_id, rmsd_set):
RMSD_set["Binding Site {}".format(bs_id)] = rmsd
pose_rmsd_per_residue[self._node_list[bs_id]] = np.mean(RMSD_set["Binding Site {}".format(bs_id)])
# update dataset
self.dataset["Binding Site Pose RMSD"] = pose_rmsd_per_residue
pose_rmsd_data = pd.DataFrame(
dict([(bs_label, pd.Series(rmsd_set)) for bs_label, rmsd_set in RMSD_set.items()]))
# plot RMSD
if plot_rmsd and n_top_poses > 0:
plot_binding_site_data(pose_rmsd_data, os.path.join(pose_dir, f"Pose_RMSD_violinplot.{fig_format}"),
title="{}".format(self._lipid), ylabel="RMSD (nm)", fig_close=fig_close)
return pose_traj, pose_rmsd_data
def compute_surface_area(self, binding_site_id=None, radii=None, plot_data=True, save_dir=None,
fig_close=False, fig_format="pdf", num_cpus=None):
"""Calculate binding site surface areas.
The accessible surface area is calculated using the Shrake and Rupley algorithm [1]_. The basic idea of this
algorithm is to generate a mesh of points representing the surface of each atom and then count the number of
points that are not within the radius of any other atoms. The surface area can be derived from this number of
exposed points.
This method utilizes the shrake_rupley function of MDTraj for calculation of the surface area. In implementation,
this method scripts the protein coordinates out of the simulation system and obtains the accessible surface area
of a binding site by summing those of its comprising residues
Atom radius is required for calculation of surface areas. MDtraj defines the radii for common atoms (see
`here <https://github.com/mdtraj/mdtraj/blob/master/mdtraj/geometry/sasa.py#L56>`_). The radius of the BB bead
in MARTINI2 is defined as 0.26 nm, the SC1/SC2/SC3 are defined as 0.23 nm in this method. Use the param ``radii``
to define or change of definition of atom radius.
Parameters
-----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
radii : dict or None, default=None
The atom radii in the python dictionary format of {atom name: radius}
plot_data : bool, default=True
Plot surface area data for the selected binding sites in a violinplot and in a time series plot.
save_dir : str or None, default=None
The directory for saving the surface area plot. If None, it will save in Bound_Poses_{lipid} under the root
directory defined at the initiation of ``LipidInteraction``.
fig_close : bool, default=False
This parameter control whether to close the plotted figures using plt.close(). It can save memory if
many figures are generated.
fig_format : str, default="pdf"
Figure format. Support formats included in matplotlib.pyplot.
num_cpus : int or None default=None
The number of CPUs used for calculating the surface areas. Python multiprocessing deployed by
`p_tqdm <https://github.com/swansonk14/p_tqdm>`_ is used to speed up these calculations.
Returns
-------
surface_area : pandas.DataFrame
Binding site surface areas as a function of time for the selected binding sites. The surface area values are
stored by columns with the column name of binding site id and the time information is stored in the column
named "Time".
See Also
---------
pylipid.func.calculate_surface_area_wrapper
A wrapper function for calculating binding site surface area from a trajectory.
pylipid.plot.plot_surface_area
Plot binding site surface area as a function of time.
pylipid.plot.plot_binding_site_data
Plot binding site data in a matplotlib violin plot.
References
----------
.. [1] <NAME>.; <NAME>., Environment and exposure to solvent of protein atoms. Lysozyme and insulin.
Journal of Molecular Biology 1973, 79 (2), 351-371
"""
MARTINI_CG_radii = {"BB": 0.26, "SC1": 0.23, "SC2": 0.23, "SC3": 0.23}
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
if "Binding Site Surface Area" in self.dataset.columns:
# keep existing data
surface_area_per_residue = np.array(self.dataset["Binding Site Surface Area"].tolist())
else:
surface_area_per_residue = np.zeros(self._nresi_per_protein)
if radii is None:
radii_book = MARTINI_CG_radii
else:
radii_book = {**MARTINI_CG_radii, **radii}
# calculate binding site surface area
selected_bs_id = np.atleast_1d(np.array(binding_site_id, dtype=int)) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
selected_bs_id_map = {bs_id: self._node_list[bs_id] for bs_id in selected_bs_id}
returned_values = p_map(partial(calculate_surface_area_wrapper, binding_site_map=selected_bs_id_map,
nprot=self._nprot, timeunit=self._timeunit, stride=self._stride,
dt_traj=self._dt_traj, radii=radii_book), self._trajfile_list,
self._topfile_list, np.arange(len(self._trajfile_list), dtype=int),
num_cpus=num_cpus, desc="CALCULATE BINDING SITE SURFACE AREA")
surface_data = []
data_keys = []
for returned_tuple in returned_values:
for idx in np.arange(len(returned_tuple[0])):
surface_data.append(returned_tuple[0][idx])
data_keys.append(returned_tuple[1][idx])
surface_area_data =
|
pd.concat(surface_data, keys=data_keys)
|
pandas.concat
|
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
import datetime
# from datetime import datetime
dire = '../../data/'
start = datetime.datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/insert_action_train2.csv', encoding='utf-8')
city = pd.read_csv(dire + 'train/city.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/insert_action_test2.csv', encoding='utf-8')
# """
############# 3.action feature_3 #############
"""
# 1. 全部浏览记录中0-9出现的次数
# 2. 对应浏览记录中0-9出现的次数
# 3. 全部浏览记录浏览时间
# 4. 对应浏览记录浏览时间
# 5. 对应浏览记录是否出现5 6
# """
# 全部浏览记录中0-9出现的次数
def count_56789(orderFuture, action):
action_1 = action[action['actionType'] == 1]
action_2 = action[action['actionType'] == 2]
action_3 = action[action['actionType'] == 3]
action_4 = action[action['actionType'] == 4]
action_5 = action[action['actionType'] == 5]
action_6 = action[action['actionType'] == 6]
action_7 = action[action['actionType'] == 7]
action_8 = action[action['actionType'] == 8]
action_9 = action[action['actionType'] == 9]
action_1 = action_1.groupby(action_1.userid)['actionType'].count().reset_index() # 每个用户1操作的总数
action_2 = action_2.groupby(action_2.userid)['actionType'].count().reset_index() # 每个用户2操作的总数
action_3 = action_3.groupby(action_3.userid)['actionType'].count().reset_index() # 每个用户3操作的总数
action_4 = action_4.groupby(action_4.userid)['actionType'].count().reset_index() # 每个用户4操作的总数
action_5 = action_5.groupby(action_5.userid)['actionType'].count().reset_index() # 每个用户5操作的总数
action_6 = action_6.groupby(action_6.userid)['actionType'].count().reset_index() # 每个用户6操作的总数
action_7 = action_7.groupby(action_7.userid)['actionType'].count().reset_index() # 每个用户7操作的总数
action_8 = action_8.groupby(action_8.userid)['actionType'].count().reset_index() # 每个用户8操作的总数
action_9 = action_9.groupby(action_9.userid)['actionType'].count().reset_index() # 每个用户9操作的总数
action_all = action.groupby(action.userid)['actionType'].count().reset_index() # 每个用户 操作的总数
action_1.rename(columns={'actionType': 'action_1'}, inplace=True)
action_2.rename(columns={'actionType': 'action_2'}, inplace=True)
action_3.rename(columns={'actionType': 'action_3'}, inplace=True)
action_4.rename(columns={'actionType': 'action_4'}, inplace=True)
action_5.rename(columns={'actionType': 'action_5'}, inplace=True)
action_6.rename(columns={'actionType': 'action_6'}, inplace=True)
action_7.rename(columns={'actionType': 'action_7'}, inplace=True)
action_8.rename(columns={'actionType': 'action_8'}, inplace=True)
action_9.rename(columns={'actionType': 'action_9'}, inplace=True)
action_all.rename(columns={'actionType': 'action_all'}, inplace=True)
orderFuture = pd.merge(orderFuture, action_1, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_2, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_3, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_4, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_5, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_6, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_7, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_8, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_9, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_all, on='userid', how='left')
orderFuture['action_1_rate'] = orderFuture['action_1']/orderFuture['action_all'] # 每个用户1操作的次数占总数的比
orderFuture['action_2_rate'] = orderFuture['action_2']/orderFuture['action_all'] # 每个用户2操作的次数占总数的比
orderFuture['action_3_rate'] = orderFuture['action_3']/orderFuture['action_all'] # 每个用户3操作的次数占总数的比
orderFuture['action_4_rate'] = orderFuture['action_4']/orderFuture['action_all'] # 每个用户4操作的次数占总数的比
orderFuture['action_5_rate'] = orderFuture['action_5']/orderFuture['action_all'] # 每个用户5操作的次数占总数的比
orderFuture['action_6_rate'] = orderFuture['action_6']/orderFuture['action_all'] # 每个用户6操作的次数占总数的比
orderFuture['action_7_rate'] = orderFuture['action_7']/orderFuture['action_all'] # 每个用户7操作的次数占总数的比
orderFuture['action_8_rate'] = orderFuture['action_8']/orderFuture['action_all'] # 每个用户8操作的次数占总数的比
orderFuture['action_9_rate'] = orderFuture['action_9']/orderFuture['action_all'] # 每个用户9操作的次数占总数的比
# print(orderFuture)
return orderFuture
orderFuture_train = count_56789(orderFuture_train, action_train)
orderFuture_test = count_56789(orderFuture_test, action_test)
# 对应浏览记录中0-9出现的次数
def count_1_9(orderFuture, action):
action_1 = action[(action['actionType'] == 1) & (action.orderid.isnull())]
action_2 = action[(action['actionType'] == 2) & (action.orderid.isnull())]
action_3 = action[(action['actionType'] == 3) & (action.orderid.isnull())]
action_4 = action[(action['actionType'] == 4) & (action.orderid.isnull())]
action_5 = action[(action['actionType'] == 5) & (action.orderid.isnull())]
action_6 = action[(action['actionType'] == 6) & (action.orderid.isnull())]
action_7 = action[(action['actionType'] == 7) & (action.orderid.isnull())]
action_8 = action[(action['actionType'] == 8) & (action.orderid.isnull())]
action_9 = action[(action['actionType'] == 9) & (action.orderid.isnull())]
action_all = action[action.orderid.isnull()]
action_1 = action_1.groupby(action_1.userid)['actionType'].count().reset_index() # 每个用户1操作的总数
action_2 = action_2.groupby(action_2.userid)['actionType'].count().reset_index() # 每个用户2操作的总数
action_3 = action_3.groupby(action_3.userid)['actionType'].count().reset_index() # 每个用户3操作的总数
action_4 = action_4.groupby(action_4.userid)['actionType'].count().reset_index() # 每个用户4操作的总数
action_5 = action_5.groupby(action_5.userid)['actionType'].count().reset_index() # 每个用户5操作的总数
action_6 = action_6.groupby(action_6.userid)['actionType'].count().reset_index() # 每个用户6操作的总数
action_7 = action_7.groupby(action_7.userid)['actionType'].count().reset_index() # 每个用户7操作的总数
action_8 = action_8.groupby(action_8.userid)['actionType'].count().reset_index() # 每个用户8操作的总数
action_9 = action_9.groupby(action_9.userid)['actionType'].count().reset_index() # 每个用户9操作的总数
action_all = action_all.groupby(action_all.userid)['actionType'].count().reset_index() # 每个用户 操作的总数
action_1.rename(columns={'actionType': 'action_1_c'}, inplace=True)
action_2.rename(columns={'actionType': 'action_2_c'}, inplace=True)
action_3.rename(columns={'actionType': 'action_3_c'}, inplace=True)
action_4.rename(columns={'actionType': 'action_4_c'}, inplace=True)
action_5.rename(columns={'actionType': 'action_5_c'}, inplace=True)
action_6.rename(columns={'actionType': 'action_6_c'}, inplace=True)
action_7.rename(columns={'actionType': 'action_7_c'}, inplace=True)
action_8.rename(columns={'actionType': 'action_8_c'}, inplace=True)
action_9.rename(columns={'actionType': 'action_9_c'}, inplace=True)
action_all.rename(columns={'actionType': 'action_all_c'}, inplace=True)
orderFuture = pd.merge(orderFuture, action_1, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_2, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_3, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_4, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_5, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_6, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_7, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_8, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_9, on='userid', how='left')
orderFuture = pd.merge(orderFuture, action_all, on='userid', how='left')
orderFuture['action_1_rate_c'] = orderFuture['action_1_c']/orderFuture['action_all_c'] # 每个用户1操作的次数占总数的比
orderFuture['action_2_rate_c'] = orderFuture['action_2_c']/orderFuture['action_all_c'] # 每个用户2操作的次数占总数的比
orderFuture['action_3_rate_c'] = orderFuture['action_3_c']/orderFuture['action_all_c'] # 每个用户3操作的次数占总数的比
orderFuture['action_4_rate_c'] = orderFuture['action_4_c']/orderFuture['action_all_c'] # 每个用户4操作的次数占总数的比
orderFuture['action_5_rate_c'] = orderFuture['action_5_c']/orderFuture['action_all_c'] # 每个用户5操作的次数占总数的比
orderFuture['action_6_rate_c'] = orderFuture['action_6_c']/orderFuture['action_all_c'] # 每个用户6操作的次数占总数的比
orderFuture['action_7_rate_c'] = orderFuture['action_7_c']/orderFuture['action_all_c'] # 每个用户7操作的次数占总数的比
orderFuture['action_8_rate_c'] = orderFuture['action_8_c']/orderFuture['action_all_c'] # 每个用户8操作的次数占总数的比
orderFuture['action_9_rate_c'] = orderFuture['action_9_c']/orderFuture['action_all_c'] # 每个用户9操作的次数占总数的比
# print(orderFuture)
return orderFuture
orderFuture_train = count_1_9(orderFuture_train, action_train)
orderFuture_test = count_1_9(orderFuture_test, action_test)
# 全部浏览记录浏览时间
def action_time(orderFuture, action):
first_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).first().reset_index()
last_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).last().reset_index()
first_action['action_time'] = last_action['actionTime'] - first_action['actionTime']
orderFuture = pd.merge(orderFuture, first_action[['userid', 'action_time']], on='userid', how='left')
return orderFuture
orderFuture_train = action_time(orderFuture_train, action_train)
orderFuture_test = action_time(orderFuture_test, action_test)
# 对应浏览记录浏览时间
def action_time_c(orderFuture, action):
action = action[action.orderid.isnull()]
first_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).first().reset_index()
last_action = action[['userid', 'actionType', 'actionTime']].groupby(['userid']).last().reset_index()
first_action['action_time_c'] = last_action['actionTime'] - first_action['actionTime']
orderFuture = pd.merge(orderFuture, first_action[['userid', 'action_time_c']], on='userid', how='left')
return orderFuture
orderFuture_train = action_time_c(orderFuture_train, action_train)
orderFuture_test = action_time_c(orderFuture_test, action_test)
# 全部浏览记录是否出现56 67 78 89
def appear_56(orderFuture, action):
count = pd.DataFrame(columns=['userid', 'action_56_count', 'action_67_count', 'action_78_count', 'action_89_count'])
userid = []
action_56_count = []
action_67_count = []
action_78_count = []
action_89_count = []
for index, row in orderFuture.iterrows():
action1 = action[action['userid'] == row.userid].reset_index()
count56 = 0
count67 = 0
count78 = 0
count89 = 0
for i in range(len(action1)):
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 5) and (action1['actionType'][i + 1] == 6)
and (action1['actionType_time'][i] < 1800)):
count56 = count56 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 6) and (action1['actionType'][i + 1] == 7)
and (action1['actionType_time'][i] < 1800)):
count67 = count67 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 7) and (action1['actionType'][i + 1] == 8)
and (action1['actionType_time'][i] < 1800)):
count78 = count78 + 1
if (((i + 1) < len(action1)) and (action1['actionType'][i] == 8) and (action1['actionType'][i + 1] == 9)
and (action1['actionType_time'][i] < 1800)):
count89 = count89 + 1
userid.append(row.userid)
action_56_count.append(count56)
action_67_count.append(count67)
action_78_count.append(count78)
action_89_count.append(count89)
count['userid'] = userid
count['action_56_count'] = action_56_count
count['action_67_count'] = action_67_count
count['action_78_count'] = action_78_count
count['action_89_count'] = action_89_count
orderFuture = pd.merge(orderFuture, count[['userid', 'action_56_count', 'action_67_count', 'action_78_count', 'action_89_count']], on='userid', how='left')
return orderFuture
orderFuture_train = appear_56(orderFuture_train, action_train)
orderFuture_test = appear_56(orderFuture_test, action_test)
# 对应浏览记录是否出现56 67 78 89
def appear_56_c(orderFuture, action):
count =
|
pd.DataFrame(columns=['userid', 'action_56_count_c', 'action_67_count_c', 'action_78_count_c', 'action_89_count_c'])
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
|
pd.testing.assert_series_equal(result, expected)
|
pandas.testing.assert_series_equal
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(
|
pd.Series(idx)
|
pandas.Series
|
#%%
############################################################################
# IMPORTS
############################################################################
import pandas as pd
import numpy as np
from utils import model_zoo, data_transformer
import argparse
import pickle
import os
#%%
############################################################################
# CONSTANTS & PARAMETERS
############################################################################
# Default file Locations and model name (parameters)
MODEL_NAME = "KERAS_LENET5"
PICKLE_PATH = "C:/kaggle/kaggle_keypoints/pickle"
MODEL_PATH = "C:/kaggle/kaggle_keypoints/models"
# Processing behavior (parameters)
NORMALIZE_LABELS = False
VERBOSE = True
USE30 = True
# Processing behavior (constants)
AVAILABLE_MODELS = ["KERAS_LENET5", "KERAS_INCEPTION", "KERAS_KAGGLE1", "KERAS_NAIMISHNET", "KERAS_CONVNET5", "KERAS_INCEPTIONV3", "KERAS_KAGGLE2", "KERAS_RESNET50", "KERAS_RESNET", "KERAS_RESNEXT50", "KERAS_RESNEXT101"]
TEST_DATA_FILE = "cleandata_naive_test.pkl"
TEST_IDS_FILE = "raw_id_lookup.pkl"
OVERLAP_FILE = "cleandata_naive_overlap.pkl"
TEST8_DATA_FILE = "cleandata_test8.pkl"
TEST30_DATA_FILE = "cleandata_test30.pkl"
#%%
############################################################################
# ARGUMENT SPECIFICATION
############################################################################
parser = argparse.ArgumentParser(description = "Performs predictions for the Kaggle Facial Keypoints Detection challenge.")
# Commandline arguments
parser.add_argument('-nv', '--no_verbose', action = 'store_true', help = 'Disables verbose output mode for more detailed descriptions of process.')
parser.add_argument('-pp', '--pickle_path', type = str, default = "C:/kaggle/kaggle_keypoints/pickle", help = "Path to location of output pickle files (post processing files).")
parser.add_argument('-mp', '--model_path', type = str, default = "C:/kaggle/kaggle_keypoints/models", help = "Path to location of output model files.")
parser.add_argument('-m', '--model_name', type = str, default = "KERAS_LENET5", help = "Name of the model to train.")
parser.add_argument('-pa', '--partial', action = 'store_true', help = 'Trains only using the 8-value dataset (vs. the full 30-value dataset)')
parser.add_argument('-nl', '--normalize_labels', action = 'store_true', help = "Enables the normalization of prediction label values prior to training.")
############################################################################
# ARGUMENT PARSING
############################################################################
def process_arguments(parsed_args, display_args = False):
global VERBOSE, PICKLE_PATH, MODEL_PATH, MODEL_NAME, NORMALIZE_LABELS, USE30
args = vars(parser.parse_args())
if display_args:
print("".join(["\PREDICT Arguments in use:\n", "-" * 30, "\n"]))
for arg in args:
print("Parameter '%s' == %s" % (arg, str(getattr(parser.parse_args(), arg))))
print("\n")
# Assign arguments to globals
VERBOSE = not args['no_verbose']
USE30 = not args['partial']
MODEL_NAME = args['model_name']
NORMALIZE_LABELS = args['normalize_labels']
MODEL_PATH = str(args['model_path']).lower().strip().replace('\\', '/')
PICKLE_PATH = str(args['pickle_path']).lower().strip().replace('\\', '/')
# validate the presence of the paths
for p, v, l in zip([MODEL_PATH, PICKLE_PATH], ['model_path', 'pickle_path'], ['Model file path', 'Pickle file path']):
if not os.path.exists(p):
raise RuntimeError(" ".join([l, "'%s'" % p, "specified in parameter `%s` does not exist." % v]))
# validate the parameters entered
if not MODEL_NAME in AVAILABLE_MODELS:
raise RuntimeError("Parameter `model_name` value of '%s' is invalid. Must be in list: %s" % (MODEL_NAME, str(AVAILABLE_MODELS)))
#%%
############################################################################
# LOAD DATA
############################################################################
# load the data for training
def load_data(pickle_path, test_file, id_file, overlap_file, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN LOAD DATA <<<\n", "-" * 50, "\n"]))
if not pickle_path.endswith("/"): pickle_path = "".join([pickle_path, "/"])
test_file = "".join([pickle_path, test_file])
id_file = "".join([pickle_path, id_file])
overlap_file = "".join([pickle_path, overlap_file])
for f, l in zip([test_file, id_file, overlap_file], ['Test', 'Test IDs', 'Overlap']):
if not os.path.isfile(f):
raise RuntimeError("%s file '%s' not found - training cancelled." % (l, f))
test = pickle.load(open(test_file, "rb"))
if verbose: print("Test file '%s' loaded; shape: %s" % (test_file, str(test.shape)))
ids = pickle.load(open(id_file, "rb"))
if verbose: print("Test IDs file '%s' loaded; shape: %s" % (id_file, str(ids.shape)))
overlap = pickle.load(open(overlap_file, "rb"))
if verbose: print("Overlap file '%s' loaded; shape: %s" % (overlap_file, str(overlap.shape)))
if verbose: print("".join(["\n", "-" * 50, "\n>>> END LOAD DATA <<<\n", "-" * 50, "\n"]))
return test, ids, overlap
# %%
############################################################################
# PREDICT MODEL (GENERIC HANDLER)
############################################################################
def predict_model(model_path, pickle_path, model_name, normalize_labels, test, ids, overlap, predict_file, skip_output = False, skip_overlap = False, full = True, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
# load helper modules for models and data transformation
models = model_zoo.Models(model_path = MODEL_PATH)
xform = data_transformer.Xform(pickle_path = PICKLE_PATH, verbose = VERBOSE)
# validate the existence of the model output path; if it doesn't exist, create it
if model_path.endswith("/"): sep_add = ""
else: sep_add = "/"
validate_path = "".join([model_path, sep_add, model_name])
if not os.path.exists(validate_path):
if verbose: print("Model output path '%s' does not yet exist, creating it." % validate_path)
os.makedirs(validate_path)
# call the training module specific to the algorithm called
if model_name == "KERAS_LENET5":
feature_name = "ALL_FEATURES"
pred = predict_model_lenet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTIONV3":
feature_name = "ALL_FEATURES"
pred, _ = predict_model_inceptionv3(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT101":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext101(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTION":
feature_name = "ALL_FEATURES"
Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols = predict_model_inception(models = models,
xform = xform, test = test, ids = ids, feature_name = feature_name, full = full, verbose = verbose)
pred = [Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols]
elif model_name == "KERAS_KAGGLE1":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle1(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_KAGGLE2":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle2(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_CONVNET5":
feature_name = "ALL_FEATURES"
pred = predict_model_convnet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_NAIMISHNET":
if full:
feature_name = ['left_eye_center', 'right_eye_center', 'left_eye_inner_corner', 'left_eye_outer_corner',
'right_eye_inner_corner', 'right_eye_outer_corner', 'left_eyebrow_inner_end', 'left_eyebrow_outer_end',
'right_eyebrow_inner_end', 'right_eyebrow_outer_end', 'nose_tip', 'mouth_left_corner', 'mouth_right_corner',
'mouth_center_top_lip', 'mouth_center_bottom_lip']
else:
feature_name = ['left_eye_center', 'right_eye_center', 'nose_tip', 'mouth_center_bottom_lip']
pred = predict_model_naimishnet(models = models, xform = xform, test = test, ids = ids, feature_name = feature_name,
normalize_labels = normalize_labels, full = full, verbose = verbose)
else:
raise RuntimeError("Model name '%s' not understood; cancelling training." % model_name)
if not skip_output:
# this branch for normal output against TEST
output_prediction(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
else:
# this branch for output of STACK cross validation
output_stack(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
if verbose: print("".join(["-" * 50, "\n>>> END PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
return pred
# %%
############################################################################
# PREDICT MODEL NAIMISHNET
############################################################################
def predict_model_naimishnet(models, xform, test, ids, feature_name, normalize_labels, full = True, verbose = True):
# create empty DF for capturing inferenced values (unpivoted x,y coordinates to columns)
submission = pd.DataFrame({'image_id':int(), 'variable':'', 'value':float()},index=[1])
submission = submission[(submission.index == -1)]
df = {}
for keypoint in feature_name:
X, subset = xform.PrepareTest(test, ids, keypoint, verbose = verbose)
subset = subset[['image_id']]
Y = models.predict_keras_naimishnet(X = X, feature_name = keypoint, full = full, verbose = verbose)
# un-normalize the predictions
mod_subset = subset.copy()
for i, lbl in zip(range(Y.shape[1]), ['_x', '_y']):
if normalize_labels:
Y[:,i] = xform.UnNormalize_Labels(Y[:,i])
# ensure pixel boundaries are clipped between 0.0 and 96.0
Y[:,i] = np.clip(Y[:,i], 0.0, 96.0)
col = "".join([keypoint, lbl])
mod_subset[col] = Y[:,i]
submission = submission.append(pd.melt(mod_subset, id_vars = ['image_id']), ignore_index = True)
submission.columns = ['image_id','feature_name','location']
return submission
#%%
############################################################################
# PREDICT MODEL LENET5
############################################################################
def predict_model_lenet5(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_lenet5(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL KAGGLE1
############################################################################
def predict_model_kaggle1(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_kaggle1(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL KAGGLE2
############################################################################
def predict_model_kaggle2(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_kaggle2(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL INCEPTIONV3
############################################################################
def predict_model_inceptionv3(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y, Y_cols = models.predict_keras_inceptionv3(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y, Y_cols
#%%
############################################################################
# PREDICT MODEL RESNET
############################################################################
def predict_model_resnet(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnet(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNET50
############################################################################
def predict_model_resnet50(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnet50(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNEXT50
############################################################################
def predict_model_resnext50(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnext50(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNEXT101
############################################################################
def predict_model_resnext101(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnext101(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL CONVNET5
############################################################################
def predict_model_convnet5(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_convnet5(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL INCEPTION
############################################################################
def predict_model_inception(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test, ids, feature_name, verbose = verbose)
Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols = models.predict_keras_inception(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols
############################################################################
# OUTPUT PREDICTIONS (STACK)
############################################################################
def output_stack(model_path, model_name, Y, feature_name, test, ids, predict_file, xform, overlap, normalize_labels, skip_overlap = False, full = True, verbose = True):
if full:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'left_eye_inner_corner_x',
'left_eye_inner_corner_y', 'left_eye_outer_corner_x', 'left_eye_outer_corner_y', 'right_eye_inner_corner_x',
'right_eye_inner_corner_y', 'right_eye_outer_corner_x','right_eye_outer_corner_y', 'left_eyebrow_inner_end_x',
'left_eyebrow_inner_end_y', 'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y', 'right_eyebrow_inner_end_x',
'right_eyebrow_inner_end_y', 'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y', 'nose_tip_x', 'nose_tip_y',
'mouth_left_corner_x', 'mouth_left_corner_y', 'mouth_right_corner_x', 'mouth_right_corner_y', 'mouth_center_top_lip_x',
'mouth_center_top_lip_y', 'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
else:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'nose_tip_x', 'nose_tip_y',
'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
# generate output for LeNet, Kaggle1, Kaggle2, ConvNet, InceptionV3, and ResNet50
if model_name in ['KERAS_LENET5', 'KERAS_KAGGLE1', 'KERAS_KAGGLE2', 'KERAS_CONVNET5', 'KERAS_INCEPTIONV3', 'KERAS_RESNET50', 'KERAS_RESNET', 'KERAS_RESNEXT50', 'KERAS_RESNEXT101']:
Y = pd.DataFrame(Y, columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
Y.index.rename('image_id', inplace = True)
# write the predictions file
Y.to_csv(predict_file, index = True)
print("Predictions written to '%s'." % predict_file)
elif model_name == 'KERAS_INCEPTION':
created_files, blend_vals = [], None
for j, l, cols in zip([Y[0], Y[1], Y[2]], ['main_model', 'aux1_model', 'aux2_model'], [Y[3], Y[4], Y[5]]):
for ncol, col in enumerate(cols):
#ol = overlap.copy()
#print(l, col)
__loop_pred_file = predict_file.replace("".join([model_name, "/"]), "".join([model_name, "/", l.upper(), "__"])).replace(".csv", "".join(["_", col.replace("/", "_"), ".csv"]))
created_files.append(__loop_pred_file)
j_df = pd.DataFrame(j[ncol], columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
j_df.index.rename('image_id', inplace = True)
for c in [c for c in train_cols if not 'image' == c]:
if NORMALIZE_LABELS:
vals = xform.UnNormalize_Labels(j_df[c].values)
j_df[c] = vals
j_df[c] = np.clip(j_df[c], 0.0, 96.0)
if blend_vals is None:
blend_vals = j_df.values
else:
blend_vals = np.mean((blend_vals, j_df.values), axis = 0)
# write the predictions file
#j_df.to_csv(__loop_pred_file, index = True)
#print("Predictions written to '%s'." % __loop_pred_file)
# now iterate over all the created files and create a blend
df_combined = pd.DataFrame(blend_vals, columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
df_combined.index.rename('image_id', inplace = True)
df_combined.to_csv(predict_file, index = True)
print("\nBlended predictions written to '%s' (mean average of all %d Inception model predictions).\n\n" % (predict_file, len(created_files)))
elif model_name == "KERAS_NAIMISHNET":
df = {}
df['image_id'] = test.image_id.values
for c in [c for c in train_cols if not 'image' == c]:
df[c] = Y[(Y.image_id.isin(test.image_id.values) & (Y.feature_name == c))].location.values
df = pd.DataFrame(df).set_index('image_id')
df.to_csv(predict_file, index = True)
print("Predictions written to '%s'." % predict_file)
return
#%%
############################################################################
# OUTPUT PREDICTIONS (TEST)
############################################################################
def output_prediction(model_path, model_name, Y, feature_name, test, ids, predict_file, xform, overlap, normalize_labels, skip_overlap = False, full = True, verbose = True):
if full:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'left_eye_inner_corner_x',
'left_eye_inner_corner_y', 'left_eye_outer_corner_x', 'left_eye_outer_corner_y', 'right_eye_inner_corner_x',
'right_eye_inner_corner_y', 'right_eye_outer_corner_x','right_eye_outer_corner_y', 'left_eyebrow_inner_end_x',
'left_eyebrow_inner_end_y', 'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y', 'right_eyebrow_inner_end_x',
'right_eyebrow_inner_end_y', 'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y', 'nose_tip_x', 'nose_tip_y',
'mouth_left_corner_x', 'mouth_left_corner_y', 'mouth_right_corner_x', 'mouth_right_corner_y', 'mouth_center_top_lip_x',
'mouth_center_top_lip_y', 'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
else:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'nose_tip_x', 'nose_tip_y',
'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
# generate output for LENET5, CONVNET5, KAGGLE1, KAGGLE2, INCEPTIONV3, RESNET50, and for our stacking METAREGRESSOR_LINEAR
if model_name in ['KERAS_LENET5', 'KERAS_KAGGLE1', 'KERAS_KAGGLE2', 'KERAS_CONVNET5', 'METAREGRESSOR_LINEAR', 'KERAS_INCEPTIONV3', 'KERAS_RESNET50', 'KERAS_RESNET', 'KERAS_RESNEXT50', 'KERAS_RESNEXT101']:
Y = pd.DataFrame(Y, columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
Y = pd.melt(Y.reset_index(), id_vars=['index'])
Y.columns = ['image_id', 'feature_name','location']
Y = ids.drop(columns=['location']).merge(Y, on=['image_id','feature_name'], how = 'inner').drop(columns=['image_id','feature_name'])
Y.columns = ['RowId','Location']
if normalize_labels:
norm_Y = xform.UnNormalize_Labels(Y.Location.values)
Y.Location = norm_Y
Y.Location = np.clip(Y.Location, 0.0, 96.0)
# write the predictions file
Y.to_csv(predict_file, index = False)
print("Predictions written to '%s'." % predict_file)
if not skip_overlap:
# write the predictions w/ overlap file
overlap = pd.melt(overlap, id_vars=['image_id'])
overlap.columns = ['image_id', 'feature_name','location']
overlap = overlap.merge(ids.drop(columns=['location']), on = ['image_id','feature_name'], how = 'inner')
overlap = overlap[['row_id', 'location']]
overlap.columns = ['RowId', 'Location']
Y = Y.set_index('RowId')
overlap = overlap.set_index('RowId')
Y.update(overlap, join = 'left', overwrite = True)
Y = Y.reset_index()
Y.to_csv(predict_file.replace("predictions", "OVERLAPpredictions"), index = False)
print("Overlap predictions written to '%s'." % predict_file.replace("predictions", "OVERLAPpredictions"))
elif model_name == 'KERAS_INCEPTION':
created_files = []
for j, l, cols in zip([Y[0], Y[1], Y[2]], ['main_model', 'aux1_model', 'aux2_model'], [Y[3], Y[4], Y[5]]):
for ncol, col in enumerate(cols):
ol = overlap.copy()
#print(l, col)
__loop_pred_file = predict_file.replace("".join([model_name, "/"]), "".join([model_name, "/", l.upper(), "__"])).replace(".csv", "".join(["_", col.replace("/", "_"), "_output",".csv"]))
created_files.append(__loop_pred_file)
j_df =
|
pd.DataFrame(j[ncol], columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
|
pandas.DataFrame
|
# %%
# Convert Instant Fuel Rate to Joules per Second (KGS and JS)
# <NAME>, Ph.D. Candidate
# %%
# Load required libraries
import pandas as pd
import matplotlib.pyplot as plt
# %%
# Load data from Excel to a pandas dataframe
def load_from_Excel(vehicle, settings):
directory = (
"../../../Google Drive/Academia/PhD Thesis/Field Experiments/Veepeak/"
+ vehicle
+ "/Processed/"
)
input_file = vehicle + " - {0} - {1}.xlsx".format(
settings["INPUT_TYPE"], settings["INPUT_INDEX"]
)
input_path = directory + input_file
sheets_dict =
|
pd.read_excel(input_path, sheet_name=None, header=0)
|
pandas.read_excel
|
import pandas as pd
messages = pd.read_csv('SMSSpamCollection',sep='\t',names=['label','message'])
#print(messages)
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(len(messages)):
review = re.sub('[^a-zA-Z]'," ",messages['message'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=5000)
x = cv.fit_transform(corpus).toarray()
y =
|
pd.get_dummies(messages['label'])
|
pandas.get_dummies
|
import copy
import logging
import pandas as pd
import numpy as np
from collections import Counter
from sklearn import preprocessing, utils
import sklearn.model_selection as ms
from scipy.sparse import isspmatrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import os
import seaborn as sns
from abc import ABC, abstractmethod
# TODO: Move this to a common lib?
OUTPUT_DIRECTORY = './output'
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
if not os.path.exists('{}/images'.format(OUTPUT_DIRECTORY)):
os.makedirs('{}/images'.format(OUTPUT_DIRECTORY))
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def plot_pairplot(title, df, class_column_name=None):
plt = sns.pairplot(df, hue=class_column_name)
return plt
# Adapted from https://stats.stackexchange.com/questions/239973/a-general-measure-of-data-set-imbalance
def is_balanced(seq):
n = len(seq)
classes = [(clas, float(count)) for clas, count in Counter(seq).items()]
k = len(classes)
H = -sum([(count/n) * np.log((count/n)) for clas, count in classes])
return H/np.log(k) > 0.75
class DataLoader(ABC):
def __init__(self, path, verbose, seed):
self._path = path
self._verbose = verbose
self._seed = seed
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
self.binary = False
self.balanced = False
self._data = pd.DataFrame()
def load_and_process(self, data=None, preprocess=True):
"""
Load data from the given path and perform any initial processing required. This will populate the
features and classes and should be called before any processing is done.
:return: Nothing
"""
if data is not None:
self._data = data
self.features = None
self.classes = None
self.testing_x = None
self.testing_y = None
self.training_x = None
self.training_y = None
else:
self._load_data()
self.log("Processing {} Path: {}, Dimensions: {}", self.data_name(), self._path, self._data.shape)
if self._verbose:
old_max_rows = pd.options.display.max_rows
pd.options.display.max_rows = 10
self.log("Data Sample:\n{}", self._data)
pd.options.display.max_rows = old_max_rows
if preprocess:
self.log("Will pre-process data")
self._preprocess_data()
self.get_features()
self.get_classes()
self.log("Feature dimensions: {}", self.features.shape)
self.log("Classes dimensions: {}", self.classes.shape)
self.log("Class values: {}", np.unique(self.classes))
class_dist = np.histogram(self.classes)[0]
class_dist = class_dist[np.nonzero(class_dist)]
self.log("Class distribution: {}", class_dist)
self.log("Class distribution (%): {}", (class_dist / self.classes.shape[0]) * 100)
self.log("Sparse? {}", isspmatrix(self.features))
if len(class_dist) == 2:
self.binary = True
self.balanced = is_balanced(self.classes)
self.log("Binary? {}", self.binary)
self.log("Balanced? {}", self.balanced)
def scale_standard(self):
self.features = StandardScaler().fit_transform(self.features)
if self.training_x is not None:
self.training_x = StandardScaler().fit_transform(self.training_x)
if self.testing_x is not None:
self.testing_x = StandardScaler().fit_transform(self.testing_x)
def build_train_test_split(self, test_size=0.3):
if not self.training_x and not self.training_y and not self.testing_x and not self.testing_y:
self.training_x, self.testing_x, self.training_y, self.testing_y = ms.train_test_split(
self.features, self.classes, test_size=test_size, random_state=self._seed, stratify=self.classes
)
def get_features(self, force=False):
if self.features is None or force:
self.log("Pulling features")
self.features = np.array(self._data.iloc[:, 0:-1])
return self.features
def get_classes(self, force=False):
if self.classes is None or force:
self.log("Pulling classes")
self.classes = np.array(self._data.iloc[:, -1])
return self.classes
def dump_test_train_val(self, test_size=0.2, random_state=123):
ds_train_x, ds_test_x, ds_train_y, ds_test_y = ms.train_test_split(self.features, self.classes,
test_size=test_size,
random_state=random_state,
stratify=self.classes)
pipe = Pipeline([('Scale', preprocessing.StandardScaler())])
train_x = pipe.fit_transform(ds_train_x, ds_train_y)
train_y = np.atleast_2d(ds_train_y).T
test_x = pipe.transform(ds_test_x)
test_y = np.atleast_2d(ds_test_y).T
train_x, validate_x, train_y, validate_y = ms.train_test_split(train_x, train_y,
test_size=test_size, random_state=random_state,
stratify=train_y)
test_y = pd.DataFrame(np.where(test_y == 0, -1, 1))
train_y = pd.DataFrame(np.where(train_y == 0, -1, 1))
validate_y = pd.DataFrame(np.where(validate_y == 0, -1, 1))
tst = pd.concat([pd.DataFrame(test_x), test_y], axis=1)
trg = pd.concat([pd.DataFrame(train_x), train_y], axis=1)
val = pd.concat([pd.DataFrame(validate_x), validate_y], axis=1)
tst.to_csv('data/{}_test.csv'.format(self.data_name()), index=False, header=False)
trg.to_csv('data/{}_train.csv'.format(self.data_name()), index=False, header=False)
val.to_csv('data/{}_validate.csv'.format(self.data_name()), index=False, header=False)
@abstractmethod
def _load_data(self):
pass
@abstractmethod
def data_name(self):
pass
@abstractmethod
def _preprocess_data(self):
pass
@abstractmethod
def class_column_name(self):
pass
@abstractmethod
def pre_training_adjustment(self, train_features, train_classes):
"""
Perform any adjustments to training data before training begins.
:param train_features: The training features to adjust
:param train_classes: The training classes to adjust
:return: The processed data
"""
return train_features, train_classes
def reload_from_hdf(self, hdf_path, hdf_ds_name, preprocess=True):
self.log("Reloading from HDF {}".format(hdf_path))
loader = copy.deepcopy(self)
df = pd.read_hdf(hdf_path, hdf_ds_name)
loader.load_and_process(data=df, preprocess=preprocess)
loader.build_train_test_split()
return loader
def log(self, msg, *args):
"""
If the learner has verbose set to true, log the message with the given parameters using string.format
:param msg: The log message
:param args: The arguments
:return: None
"""
if self._verbose:
logger.info(msg.format(*args))
class CreditDefaultData(DataLoader):
def __init__(self, path='data/default of credit card clients.xls', verbose=False, seed=1):
super().__init__(path, verbose, seed)
def _load_data(self):
self._data =
|
pd.read_excel(self._path, header=1, index_col=0)
|
pandas.read_excel
|
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
calc_op_types = (
nn.Conv2d, nn.ConvTranspose2d,
nn.Linear,
nn.BatchNorm2d,
)
def summary(model, x, calc_op_types=calc_op_types, *args, **kwargs):
"""Summarize the given input model.
Summarized information are 1) output shape, 2) kernel shape,
3) number of the parameters and 4) operations (Mult-Adds)
Args:
model (Module): Model to summarize
x (Tensor): Input tensor of the model with [N, C, H, W] shape
dtype and device have to match to the model
args, kwargs: Other argument used in `model.forward` function
"""
def register_hook(module):
def hook(module, inputs, outputs):
module_idx = len(summary)
# Lookup name in a dict that includes parents
for name, item in module_names.items():
if item == module:
key = "{}_{}".format(module_idx, name)
info = OrderedDict()
info["id"] = id(module)
if isinstance(outputs, (list, tuple)):
try:
info["out"] = list(outputs[0].size())
except AttributeError:
# pack_padded_seq and pad_packed_seq store feature into data attribute
info["out"] = list(outputs[0].data.size())
else:
info["out"] = list(outputs.size())
info["ksize"] = "-"
info["inner"] = OrderedDict()
info["params_nt"], info["params"], info["macs"] = 0, 0, 0
for name, param in module.named_parameters():
info["params"] += param.nelement() * param.requires_grad
info["params_nt"] += param.nelement() * (not param.requires_grad)
if name == "weight":
ksize = list(param.size())
# to make [in_shape, out_shape, ksize, ksize]
if len(ksize) > 1:
ksize[0], ksize[1] = ksize[1], ksize[0]
info["ksize"] = ksize
# ignore N, C when calculate Mult-Adds in ConvNd
if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d):
assert len(inputs[0].size()) == 4 and len(inputs[0].size()) == len(outputs[0].size())+1
in_c, in_h, in_w = inputs[0].size()[1:]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = outputs[0].size()
groups = module.groups
kernel_mul = k_h * k_w * (in_c // groups)
# conv
if isinstance(module, nn.Conv2d):
kernel_mul_group = kernel_mul * out_h * out_w * (out_c // groups)
# deconv
elif isinstance(module, nn.ConvTranspose2d):
# kernel_mul_group = kernel_mul * in_h * in_w * (out_c // groups)
kernel_mul_group = kernel_mul * out_h * out_w * (out_c // groups)
total_mul = kernel_mul_group * groups
info["macs"] += total_mul
elif isinstance(module, nn.BatchNorm2d):
info["macs"] += inputs[0].size()[1]
else:
info["macs"] += param.nelement()
# RNN modules have inner weights such as weight_ih_l0
elif "weight" in name:
info["inner"][name] = list(param.size())
info["macs"] += param.nelement()
# if the current module is already-used, mark as "(recursive)"
# check if this module has params
if list(module.named_parameters()):
for v in summary.values():
if info["id"] == v["id"]:
info["params"] = "(recursive)"
if info["params"] == 0:
info["params"], info["macs"] = "-", "-"
summary[key] = info
# ignore Sequential and ModuleList
if isinstance(module, calc_op_types) or not module._modules:
hooks.append(module.register_forward_hook(hook))
module_names = get_names_dict(model)
hooks = []
summary = OrderedDict()
model.apply(register_hook)
try:
with torch.no_grad():
model(x) if not (kwargs or args) else model(x, *args, **kwargs)
finally:
for hook in hooks:
hook.remove()
# Use pandas to align the columns
df = pd.DataFrame(summary).T
df["Mult-Adds"] =
|
pd.to_numeric(df["macs"], errors="coerce")
|
pandas.to_numeric
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators.ipynb (unless otherwise specified).
__all__ = ['getColName', 'getColByName', 'addKey', 'nullIfEqual', 'sumInts', 'age5', 'age18', 'age24', 'age64', 'age65',
'bahigher', 'carpool', 'drvalone', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hisp', 'hh25inc',
'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhs', 'hsdipl', 'lesshs', 'male', 'mhhi', 'drvalone',
'novhcl', 'nohhint', 'othercom', 'paa', 'p2more', 'pasi', 'pubtran', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav14', 'trav45', 'trav44', 'unempr', 'unempr', 'walked', 'createAcsIndicator']
# Cell
#@title Run This Cell: Misc Function Declarations
# These functions right here are used in the calculations below.
# Finds a column matchings a substring
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
# Pulls a column from one dataset into a new dataset.
# This is not a crosswalk. calls getColByName()
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
# Return 0 if two specified columns are equal.
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
# I'm thinking this doesnt need to be a function..
def sumInts(df): return df.sum(numeric_only=True)
# Cell
#@title Run This Cell: Create age5
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age5( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_027E_Total_Female_Under_5_years',
'B01001_003E_Total_Male_Under_5_years',
'B01001_001E_Total' , 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age18
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age18( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_001E_Total',
'B01001_004E_Total_Male_5_to_9_years',
'B01001_005E_Total_Male_10_to_14_years' ,
'B01001_006E_Total_Male_15_to_17_years',
'B01001_028E_Total_Female_5_to_9_years',
'B01001_029E_Total_Female_10_to_14_years' ,
'B01001_030E_Total_Female_15_to_17_years']
columns = df.filter(regex='001E|004E|005E|006E|028E|029E|030E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='004E|005E|006E|028E|029E|030E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: Create age24
#File: age24.py
#Author: <NAME>
#Date: 9/8/21
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age24( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_007E_Total_Male_18_and_19_years',
'B01001_008E_Total_Male_20_years',
'B01001_009E_Total_Male_21_years' ,
'B01001_010E_Total_Male_22_to_24_years' ,
'B01001_031E_Total_Female_18_and_19_years' ,
'B01001_032E_Total_Female_20_years' ,
'B01001_033E_Total_Female_21_years' ,
'B01001_034E_Total_Female_22_to_24_years',
'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age64
import pandas as pd
import glob
def age64( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: age65
import pandas as pd
import glob
def age65( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: bahigher
import pandas as pd
import glob
def bahigher( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='005E|006E').sum(axis=1)
) / df['B06009_001E'] * 100
return fi
# Cell
#@title Run This Cell: - carpool
import pandas as pd
import glob
def carpool( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|017E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_017E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: - drvalone
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -elheat
import pandas as pd
import glob
def elheat( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='B25040_004E|B25040_001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B25040_004E').sum(axis=1)
) / ( df.filter(regex='B25040_001E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -empl
import pandas as pd
import glob
def empl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -fam
import pandas as pd
import glob
def fam( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -female
import pandas as pd
import glob
def female( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['female'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -femhhs
import pandas as pd
import glob
def femhhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['femhhs'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -heatgas
import pandas as pd
import glob
def heatgas( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: hisp
import pandas as pd
import glob
def hisp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_012E_Total_Hispanic_or_Latino']
columns = df.filter(regex='001E|012E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
fi['final'] = ( df.filter(regex='012E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: hh25inc
import pandas as pd
import glob
def hh25inc( df, columnsToInclude ):
df.columns = df.columns.str.replace(r"[$]", "")
fi = pd.DataFrame()
columns = ['B19001_001E_Total',
"B19001_002E_Total_Less_than_10,000",
"B19001_003E_Total_10,000_to_14,999",
"B19001_004E_Total_15,000_to_19,999",
"B19001_005E_Total_20,000_to_24,999"]
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey col: ', col, df.columns)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E|003E|004E|005E').sum(axis=1)
) / df['B19001_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -hh40inc
import pandas as pd
import glob
def hh40inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh60inc
import pandas as pd
import glob
def hh60inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh75inc
import pandas as pd
import glob
def hh75inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhchpov
import pandas as pd
import glob
def hhchpov( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhm75
import pandas as pd
import glob
def hhm75( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhs
import pandas as pd
import glob
def hhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hsdipl
import pandas as pd
import glob
def hsdipl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -lesshs
import pandas as pd
import glob
def lesshs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -male
import pandas as pd
import glob
def male( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
# @title Run This Cell : Create MHHI
#File: mhhi.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2016 INFLATION-ADJUSTED DOLLARS)
# Universe: Households
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Sustainability - Percent of Population that Walks to Work Indicator
#input:
#output:
import pandas as pd
import glob
def mhhi( df, columnsToInclude = [] ):
info = pd.DataFrame(
[
['B19001_002E', 0, 10000],
['B19001_003E', 10000, 4999 ],
['B19001_004E', 15000, 4999 ],
['B19001_005E', 20000, 4999 ],
['B19001_006E', 25000, 4999 ],
['B19001_007E', 30000, 4999],
['B19001_008E', 35000, 4999 ],
['B19001_009E', 40000, 4999 ],
['B19001_010E', 45000, 4999 ],
['B19001_011E', 50000, 9999 ],
['B19001_012E', 60000, 14999],
['B19001_013E', 75000, 24999 ],
['B19001_014E', 100000, 24999 ],
['B19001_015E', 125000, 24999 ],
['B19001_016E', 150000, 49000 ],
['B19001_017E', 200000, 1000000000000000000000000 ],
],
columns=['variable', 'lower', 'range']
)
# Final Dataframe
data_table = pd.DataFrame()
for index, row in info.iterrows():
data_table = addKey(df, data_table, row['variable'])
# Accumulate totals accross the columns.
# Midpoint: Divide column index 16 (the last column) of the cumulative totals
temp_table = data_table.cumsum(axis=1)
temp_table['midpoint'] = (temp_table.iloc[ : , -1 :] /2) # V3
temp_table['midpoint_index'] = False
temp_table['midpoint_index_value'] = False # Z3
temp_table['midpoint_index_lower'] = False # W3
temp_table['midpoint_index_range'] = False # X3
temp_table['midpoint_index_minus_one_cumulative_sum'] = False #Y3
# step 3 - csa_agg3: get the midpoint index by "when midpoint > agg[1] and midpoint <= agg[2] then 2"
# Get CSA Midpoint Index using the breakpoints in our info table.
for index, row in temp_table.iterrows():
# Get the index of the first column where our midpoint is greater than the columns value.
midpoint = row['midpoint']
midpoint_index = 0
# For each column (except the 6 columns we just created)
# The tracts midpoint was < than the first tracts value at column 'B19001_002E_Total_Less_than_$10,000'
if( midpoint < int(row[0]) or row[-6] == False ):
temp_table.loc[ index, 'midpoint_index' ] = 0
else:
for column in row.iloc[:-6]:
# set midpoint index to the column with the highest value possible that is under midpoint
if( midpoint >= int(column) ):
if midpoint==False: print (str(column) + ' - ' + str(midpoint))
temp_table.loc[ index, 'midpoint_index' ] = midpoint_index +1
midpoint_index += 1
# temp_table = temp_table.drop('Unassigned--Jail')
for index, row in temp_table.iterrows():
temp_table.loc[ index, 'midpoint_index_value' ] = data_table.loc[ index, data_table.columns[row['midpoint_index']] ]
temp_table.loc[ index, 'midpoint_index_lower' ] = info.loc[ row['midpoint_index'] ]['lower']
temp_table.loc[ index, 'midpoint_index_range' ] = info.loc[ row['midpoint_index'] ]['range']
temp_table.loc[ index, 'midpoint_index_minus_one_cumulative_sum'] = row[ row['midpoint_index']-1 ]
# This is our denominator, which cant be negative.
for index, row in temp_table.iterrows():
if row['midpoint_index_value']==False:
temp_table.at[index, 'midpoint_index_value']=1;
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# Calculation = (midpoint_lower::numeric + (midpoint_range::numeric * ( (midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
# Calculation = W3+X3*((V3-Y3)/Z3)
# v3 -> 1 - midpoint of households == sum / 2
# w3 -> 2 - lower limit of the income range containing the midpoint of the housing total == row[lower]
# x3 -> width of the interval containing the medium == row[range]
# z3 -> number of hhs within the interval containing the median == row[total]
# y3 -> 4 - cumulative frequency up to, but no==NOT including the median interval
#~~~~~~~~~~~~~~~
def finalCalc(x):
return ( x['midpoint_index_lower']+ x['midpoint_index_range']*(
( x['midpoint']-x['midpoint_index_minus_one_cumulative_sum'])/ x['midpoint_index_value'] )
)
temp_table['final'] = temp_table.apply(lambda x: finalCalc(x), axis=1)
temp_table[columnsToInclude] = df[columnsToInclude]
return temp_table
# Cell
#@ title Run This Cell: -nilf
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: novhcl
import pandas as pd
import glob
def novhcl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B08201_002E_Total_No_vehicle_available','B08201_001E_Total']
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E').sum(axis=1)
) / df['B08201_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: nohhint
import pandas as pd
import glob
def nohhint( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B28011_001E_Total',
'B28011_002E_Total_With_an_Internet_subscription',
'B28011_003E_Total_With_an_Internet_subscription_Dial-up_alone',
'B28011_004E_Total_With_an_Internet_subscription_Broadband_such_as_cable,_fiber_optic,_or_DSL',
'B28011_005E_Total_With_an_Internet_subscription_Satellite_Internet_service',
'B28011_006E_Total_With_an_Internet_subscription_Other_service',
'B28011_007E_Total_Internet_access_without_a_subscription',
'B28011_008E_Total_No_Internet_access']
columns = df.filter(regex='008E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
# Calculate
fi['nohhint'] = ( df.filter(regex='008E').sum(axis=1)
) / df['B28011_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -othercom
import pandas as pd
import glob
def othercom( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['othercom'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: paa
import pandas as pd
import glob
def paa( df, columnsToInclude ):
fi =
|
pd.DataFrame()
|
pandas.DataFrame
|
from reinforcement_learning.market.random_agent import RandomAgent
import sys
sys.path.insert(0, '../../../etf_data')
from etf_data_loader import load_all_data_from_file2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import seaborn as sns
def gen_random_date(year_low, year_high):
y = np.random.randint(year_low, year_high)
m = np.random.randint(1, 12)
d = np.random.randint(1, 28)
return datetime(year=y, month=m, day=d)
def get_data_random_dates(df_adj_close, min_year, max_year):
rand_start = gen_random_date(min_year, max_year)
rand_end = gen_random_date(min_year, max_year)
if rand_start > rand_end:
tmp = rand_start
rand_start = rand_end
rand_end = tmp
data = df_adj_close[df_adj_close['date'] > str(rand_start)]
data = data[data['date'] < str(rand_end)]
return data
def clean_data(df_adj_close, ticket):
top = df_adj_close.index.max()
for index in df_adj_close.index:
if df_adj_close.loc[index, ticket] == 0.:
for i in range(index, top + 1):
if df_adj_close.loc[i, ticket] > 0.:
df_adj_close.loc[index, ticket] = df_adj_close.loc[i, ticket]
break
return df_adj_close
start_date = '1993-01-01'
end_date = '2018-01-01'
prefix = 'mil_'
ranked =
|
pd.read_csv('../../../buy_hold_simulation/evaluation_results/mil_evaluation_result_1.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""Retrieve metadata from PLEXOS production cost modelling results.
Database can be either a h5plexos file or a formatted Marmot hdf5 file.
@author: <NAME>
"""
import os
import sys
import h5py
import pandas as pd
import numpy as np
import logging
class MetaData():
"""Handle the retrieval of metadata from the formatted or
original plexos solution h5 files.
Attributes:
filename (str) = The name of the h5 file to retreive data from.
h5_data (h5py.File) = loaded h5 file in memory.
"""
filename: str = None
h5_data: h5py.File = None
def __init__(self, HDF5_folder_in: str, read_from_formatted_h5: bool = True,
Region_Mapping: pd.DataFrame = pd.DataFrame(),
partition_number: int = 0):
"""
Args:
HDF5_folder_in (str): Folder containing h5plexos h5 files.
read_from_formatted_h5 (bool, optional): Boolean for whether the metadata is
being read from the formatted hdf5 file or the original PLEXOS solution file.
Defaults to True.
Region_Mapping (pd.DataFrame, optional): DataFrame of extra regions to map.
Defaults to pd.DataFrame().
partition_number (int, optional): Which temporal partition of h5 data to retrieve
metadata from in the formatted h5 file. Defaults to 0.
"""
self.logger = logging.getLogger('marmot_format.'+__name__)
self.HDF5_folder_in = HDF5_folder_in
self.Region_Mapping = Region_Mapping
self.read_from_formatted_h5 = read_from_formatted_h5
self.partition_number = partition_number
self.start_index = None
@classmethod
def _check_if_existing_filename(cls, filename: str) -> bool:
"""Check if the passed filename is the same or different from previous calls.
If file is different replaces the filename with new value
and closes old file
Args:
filename (str): The name of the h5 file to retreive data from.
Returns:
bool: False if new file, True if existing
"""
if cls.filename != filename:
cls.filename = filename
cls.close_h5()
return False
elif cls.filename == filename:
return True
@classmethod
def close_h5(cls) -> None:
"""Closes h5 file open in memory.
"""
if cls.h5_data:
cls.h5_data.close()
def _read_data(self, filename: str) -> None:
"""Reads h5 file into memory.
Args:
filename (str): The name of the h5 file to retreive
data from.
"""
self.logger.debug(f"Reading New h5 file: {filename}")
processed_file_format = "{}_formatted.h5"
try:
if self.read_from_formatted_h5:
filename = processed_file_format.format(filename)
self.h5_data = h5py.File(os.path.join(self.HDF5_folder_in, filename), 'r')
partitions = [key for key in self.h5_data['metadata'].keys()]
if self.partition_number > len(partitions):
self.logger.warning(f"\nYou have chosen to use metadata partition_number {self.partition_number}, "
f"But there are only {len(partitions)} partitions in your formatted h5 file.\n"
"Defaulting to partition_number 0")
self.partition_number = 0
self.start_index = f"metadata/{partitions[self.partition_number]}/"
else:
self.h5_data = h5py.File(os.path.join(self.HDF5_folder_in, filename), 'r')
self.start_index = "metadata/"
except OSError:
if self.read_from_formatted_h5:
self.logger.warning("Unable to find processed HDF5 file to retrieve metadata.\n"
"Check scenario name.")
return
else:
self.logger.info("\nIn order to initialize your database's metadata, "
"Marmot is looking for a h5plexos solution file.\n"
f"It is looking in {self.HDF5_folder_in}, but it cannot "
"find any *.h5 files there.\n"
"Please check the 'PLEXOS_Solutions_folder' input in your "
"'Marmot_user_defined_inputs.csv'.\n"
"Ensure that it matches the filepath containing the *.h5 files "
"created by h5plexos.\n\nMarmot will now quit.")
sys.exit()
def generator_category(self, filename: str) -> pd.DataFrame:
"""Generator categories mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
gen_category = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/generator']))
except KeyError:
gen_category = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/generators']))
gen_category.rename(columns={'name':'gen_name','category':'tech'}, inplace=True)
gen_category = gen_category.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except KeyError:
gen_category = pd.DataFrame()
return gen_category
def region_generators(self, filename: str) -> pd.DataFrame:
"""Region generators mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/regions_generators']))
except KeyError:
region_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_generators']))
region_gen.rename(columns={'child':'gen_name','parent':'region'}, inplace=True)
region_gen = region_gen.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_gen.drop_duplicates(subset=["gen_name"],keep='first',inplace=True) #For generators which belong to more than 1 region, drop duplicates.
except KeyError:
region_gen = pd.DataFrame()
return region_gen
def region_generator_category(self, filename: str) -> pd.DataFrame:
"""Region generators category mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
try:
region_gen = self.region_generators(filename)
gen_category = self.generator_category(filename)
region_gen_cat = region_gen.merge(gen_category,
how="left", on='gen_name').sort_values(by=['tech','gen_name']).set_index('region')
except KeyError:
region_gen_cat = pd.DataFrame()
return region_gen_cat
def zone_generators(self, filename: str) -> pd.DataFrame:
"""Zone generators mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zones_generators']))
except KeyError:
zone_gen = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_generators']))
zone_gen.rename(columns={'child':'gen_name','parent':'zone'}, inplace=True)
zone_gen = zone_gen.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_gen.drop_duplicates(subset=["gen_name"],keep='first',inplace=True) #For generators which belong to more than 1 region, drop duplicates.
except KeyError:
zone_gen = pd.DataFrame()
return zone_gen
def zone_generator_category(self, filename: str) -> pd.DataFrame:
"""Zone generators category mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
try:
zone_gen = self.zone_generators(filename)
gen_category = self.generator_category(filename)
zone_gen_cat = zone_gen.merge(gen_category,
how="left", on='gen_name').sort_values(by=['tech','gen_name']).set_index('zone')
except KeyError:
zone_gen_cat = pd.DataFrame()
return zone_gen_cat
# Generator storage has been updated so that only one of tail_storage & head_storage is required
# If both are available, both are used
def generator_storage(self, filename: str) -> pd.DataFrame:
"""Generator Storage mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
head_tail = [0,0]
try:
generator_headstorage = pd.DataFrame()
generator_tailstorage = pd.DataFrame()
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_headstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/exportinggenerators_headstorage']))
head_tail[0] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
try:
generator_tailstorage = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/importinggenerators_tailstorage']))
head_tail[1] = 1
except KeyError:
pass
if head_tail[0] == 1:
if head_tail[1] == 1:
gen_storage = pd.concat([generator_headstorage, generator_tailstorage])
else:
gen_storage = generator_headstorage
else:
gen_storage = generator_tailstorage
gen_storage.rename(columns={'child':'name','parent':'gen_name'}, inplace=True)
gen_storage = gen_storage.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except:
gen_storage = pd.DataFrame()
return gen_storage
def node_region(self, filename: str) -> pd.DataFrame:
"""Node Region mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
node_region = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/nodes_region']))
except KeyError:
node_region = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/node_region']))
node_region.rename(columns={'child':'region','parent':'node'}, inplace=True)
node_region = node_region.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
node_region = node_region.sort_values(by=['node']).set_index('region')
except:
node_region = pd.DataFrame()
return node_region
def node_zone(self, filename: str) -> pd.DataFrame:
"""Node zone mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
node_zone = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/nodes_zone']))
except KeyError:
node_zone = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/node_zone']))
node_zone.rename(columns={'child':'zone','parent':'node'}, inplace=True)
node_zone = node_zone.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
node_zone = node_zone.sort_values(by=['node']).set_index('zone')
except:
node_zone = pd.DataFrame()
return node_zone
def generator_node(self, filename: str) -> pd.DataFrame:
"""generator node mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
generator_node = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generators_nodes']))
except KeyError:
generator_node = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/generator_nodes']))
generator_node.rename(columns={'child':'node','parent':'gen_name'}, inplace=True)
generator_node = generator_node.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
# generators_nodes = generators_nodes.sort_values(by=['generator'])
except:
generator_node = pd.DataFrame()
return generator_node
def regions(self, filename: str) -> pd.DataFrame:
"""Region objects.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
regions = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/regions']))
except KeyError:
regions = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/region']))
regions = regions.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
regions.rename(columns={'name':'region'}, inplace=True)
regions.sort_values(['category','region'],inplace=True)
except KeyError:
self.logger.warning("Regional data not included in h5plexos results")
regions = pd.DataFrame()
return regions
def zones(self, filename: str) -> pd.DataFrame:
"""Zone objects.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zones = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/zones']))
except KeyError:
zones = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/zone']))
zones = zones.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except KeyError:
self.logger.warning("Zonal data not included in h5plexos results")
zones = pd.DataFrame()
return zones
def lines(self, filename: str) -> pd.DataFrame:
"""Line objects.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
lines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/lines']))
except KeyError:
lines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/line']))
lines = lines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
lines.rename(columns={"name":"line_name"},inplace=True)
except KeyError:
self.logger.warning("Line data not included in h5plexos results")
return lines
def region_regions(self, filename: str) -> pd.DataFrame:
"""Region-region mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
region_regions = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_regions']))
region_regions = region_regions.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except KeyError:
self.logger.warning("region_regions data not included in h5plexos results")
return region_regions
def region_interregionallines(self, filename: str) -> pd.DataFrame:
"""Region inter-regional lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_interregionallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_interregionallines']))
except KeyError:
region_interregionallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_interregionalline']))
region_interregionallines = region_interregionallines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_interregionallines.rename(columns={"parent":"region","child":"line_name"},inplace=True)
if not self.Region_Mapping.empty:
region_interregionallines=pd.merge(region_interregionallines,self.Region_Mapping,how='left',on="region")
except KeyError:
region_interregionallines = pd.DataFrame()
self.logger.warning("Region Interregionallines data not included in h5plexos results")
return region_interregionallines
def region_intraregionallines(self, filename: str) -> pd.DataFrame:
"""Region intra-regional lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_intraregionallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_intraregionallines']))
except KeyError:
try:
region_intraregionallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_intraregionalline']))
except KeyError:
region_intraregionallines=pd.concat([pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_importinglines'])),
pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_exportinglines']))]).drop_duplicates()
region_intraregionallines = region_intraregionallines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_intraregionallines.rename(columns={"parent":"region","child":"line_name"},inplace=True)
if not self.Region_Mapping.empty:
region_intraregionallines=pd.merge(region_intraregionallines,self.Region_Mapping,how='left',on="region")
except KeyError:
region_intraregionallines = pd.DataFrame()
self.logger.warning("Region Intraregionallines Lines data not included in h5plexos results")
return region_intraregionallines
def region_exporting_lines(self, filename: str) -> pd.DataFrame:
"""Region exporting lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_exportinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_exportinglines']))
except KeyError:
region_exportinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_exportingline']))
region_exportinglines = region_exportinglines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_exportinglines = region_exportinglines.rename(columns={'parent':'region','child':'line_name'})
if not self.Region_Mapping.empty:
region_exportinglines=pd.merge(region_exportinglines,self.Region_Mapping,how='left',on="region")
except KeyError:
self.logger.warning("Region Exporting Lines data not included in h5plexos results")
return region_exportinglines
def region_importing_lines(self, filename: str) -> pd.DataFrame:
"""Region importing lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
region_importinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_importinglines']))
except KeyError:
region_importinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/region_importingline']))
region_importinglines = region_importinglines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
region_importinglines = region_importinglines.rename(columns={'parent':'region','child':'line_name'})
if not self.Region_Mapping.empty:
region_importinglines=pd.merge(region_importinglines,self.Region_Mapping,how='left',on="region")
except KeyError:
self.logger.warning("Region Importing Lines data not included in h5plexos results")
return region_importinglines
def zone_interzonallines(self, filename: str) -> pd.DataFrame:
"""Zone inter-zonal lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_interzonallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_interzonallines']))
except KeyError:
zone_interzonallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_interzonalline']))
zone_interzonallines = zone_interzonallines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_interzonallines.rename(columns={"parent":"region","child":"line_name"},inplace=True)
except KeyError:
zone_interzonallines = pd.DataFrame()
self.logger.warning("Zone Interzonallines data not included in h5plexos results")
return zone_interzonallines
def zone_intrazonallines(self, filename: str) -> pd.DataFrame:
"""Zone intra-zonal lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_intrazonallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_intrazonallines']))
except KeyError:
zone_intrazonallines=pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_intrazonalline']))
zone_intrazonallines = zone_intrazonallines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_intrazonallines.rename(columns={"parent":"region","child":"line_name"},inplace=True)
except KeyError:
zone_intrazonallines = pd.DataFrame()
self.logger.warning("Zone Intrazonallines Lines data not included in h5plexos results")
return zone_intrazonallines
def zone_exporting_lines(self, filename: str) -> pd.DataFrame:
"""Zone exporting lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_exportinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_exportinglines']))
except KeyError:
zone_exportinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_exportingline']))
zone_exportinglines = zone_exportinglines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_exportinglines = zone_exportinglines.rename(columns={'parent':'region','child':'line_name'})
except KeyError:
self.logger.warning("zone exporting lines data not included in h5plexos results")
zone_exportinglines = pd.DataFrame()
return zone_exportinglines
def zone_importing_lines(self, filename: str) -> pd.DataFrame:
"""Zone importing lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
zone_importinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_importinglines']))
except KeyError:
zone_importinglines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/zone_importingline']))
zone_importinglines = zone_importinglines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
zone_importinglines = zone_importinglines.rename(columns={'parent':'region','child':'line_name'})
except KeyError:
self.logger.warning("zone importing lines data not included in h5plexos results")
zone_importinglines = pd.DataFrame()
return zone_importinglines
def interface_lines(self, filename: str) -> pd.DataFrame:
"""Interface to lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
interface_lines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/interface_lines']))
except KeyError:
interface_lines = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/interfaces_lines']))
interface_lines = interface_lines.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
interface_lines = interface_lines.rename(columns={'parent':'interface','child':'line'})
except KeyError:
self.logger.warning("Interface Lines data not included in h5plexos results")
return interface_lines
def region_lines(self, filename: str) -> pd.DataFrame:
"""Region to Lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
region_interregionallines = self.region_interregionallines(filename)
region_intraregionallines = self.region_intraregionallines(filename)
region_lines = pd.concat([region_interregionallines,region_intraregionallines])
return region_lines
def zone_lines(self, filename: str) -> pd.DataFrame:
"""Zone to Lines mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
zone_interzonallines = self.zone_interzonallines(filename)
zone_intrazonallines = self.zone_intrazonallines(filename)
zone_lines = pd.concat([zone_interzonallines,zone_intrazonallines])
zone_lines = zone_lines.rename(columns={'region':'zone'})
return zone_lines
def reserves(self, filename: str) -> pd.DataFrame:
"""Reserves objects.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
reserves = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/reserves']))
except KeyError:
reserves = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'objects/reserve']))
reserves = reserves.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
except KeyError:
self.logger.warning("Reserves data not included in h5plexos results")
return reserves
def reserves_generators(self, filename: str) -> pd.DataFrame:
"""Reserves to generators mapping.
Args:
filename (str): The name of the h5 file to retreive data from.
If retreiving from fromatted h5 file, just pass scenario name.
"""
if not self._check_if_existing_filename(filename):
self._read_data(filename)
try:
try:
reserves_generators = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/reserves_generators']))
except KeyError:
reserves_generators = pd.DataFrame(np.asarray(self.h5_data[self.start_index + 'relations/reserve_generators']))
reserves_generators = reserves_generators.applymap(lambda x: x.decode("utf-8") if isinstance(x, bytes) else x)
reserves_generators = reserves_generators.rename(columns={'child':'gen_name'})
except KeyError:
self.logger.warning("Reserves data not included in h5plexos results")
reserves_generators =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng =
|
pd.period_range("1/1/2000", freq="90D", periods=3)
|
pandas.period_range
|
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/
|
pd.Timedelta('1D')
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
import pandas as pd
import os
import params
from models import gradient_boosting, random_forest
train_df = pd.read_csv('input/processed/train.csv')
test_df =
|
pd.read_csv('input/processed/test.csv')
|
pandas.read_csv
|
from flask import Flask, render_template, request
import pandas as pd
import numpy as np
import re
import tensorflow as tf
from tensorflow.keras.layers import Dropout, Dense, GRU
import os
import joblib
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/topTen', methods=['GET'])
def topTen():
source = []
number = []
date = "2019-07-30"
orderDate = "20190730"
if request.args.get("date") != None:
date = request.args.get("date")
orderDate = re.sub(r"-", '', request.args.get("date"))
data = pd.read_csv('dataset/result.csv')
orderTotalByDay = data.groupby('OrderDate', as_index=False).size()
#data['OrderDate'].astype(str)
#data['OrderDate'] = data['OrderDate'].apply(changeDate)
data['OrderDate'] = data.OrderDate.astype(str).astype(int)
data = data.loc[data['OrderDate'] == int(orderDate)]
dataCount = data.groupby('ItemName', as_index=False)['OrderDate'].count()
dataArray = dataCount.to_numpy()
dataArray = dataArray[np.argsort(dataArray[:,1])]
[rows, cols] = dataArray.shape
for i in range(10):
number.append(int(dataArray[rows-i-1,1]))
source.append(str(dataArray[rows-i-1,0]))
return render_template("topTen.html", source=source, number=number, date=date)
@app.route('/onedish', methods=['GET'])
def onedish():
dateStart = "20190730"
dateEnd = "20190720"
date1 = "2019-07-30"
date2 = "2019-07-20"
food = "Chicken Tikka Masala"
if request.args.get("date1") != None:
date1 = request.args.get("date1")
dateStart = re.sub(r"-", '', request.args.get("date1"))
date2 = request.args.get("date2")
dateEnd = re.sub(r"-", '', request.args.get("date2"))
food = request.args.get("food")
data = pd.read_csv('dataset/result.csv')
dataPrice = pd.read_csv('dataset/restaurant-1-products-price.csv')
data = data.loc[(data['OrderDate'] <= int(dateStart)) & (data['OrderDate'] >= int(dateEnd))]
orderTotalByDay = data.groupby('OrderDate', as_index=False).size()
dataCount = data.groupby(['OrderDate', 'ItemName'], as_index=False).size()
dataCount['ItemName'] = dataCount.ItemName.astype(str)
dataCount = dataCount.loc[dataCount['ItemName'] == food]
res = pd.merge(orderTotalByDay, dataCount, on=["OrderDate"], how="outer")
res = res['size_y'].fillna(0)
date = []
total = []
dish = []
dataArray = orderTotalByDay.to_numpy()
res = res.to_numpy()
[rows, cols] = dataArray.shape
for i in range(rows):
date.append(dataArray[i,0])
total.append(dataArray[i,1])
dish.append(res[i])
dataPrice = dataPrice.to_numpy()
[rows, cols] = dataPrice.shape
foodlist = []
for i in range(rows):
foodlist.append(dataPrice[i,0])
return render_template("onedish.html", date=date, total=total, dish=dish, date1=date1, date2=date2, food=food, foodlist=foodlist)
@app.route('/orders', methods=['GET'])
def orders():
def changeDate(date):
str = re.sub(r".\d\d:\d\d", '', date)
res = str[6:] + str[3:5] + str[0:2]
return res
def changeDate2(date):
str = re.sub(r"\d\d/\d\d/\d\d\d\d.", '', date)
res = str[0:2] + str[3:]
return res
date1 = "2019-07-30"
date = "20190730"
if request.args.get("date1") != None:
date1 = request.args.get("date1")
date = re.sub(r"-", '', request.args.get("date1"))
timeLabel = ["16:00", "16:30", "17:00", "17:30", "18:00", "18:30", "19:00", "19:30", "20:00", "20:30", "21:00", "21:30", "22:00", "22:30", "23:00", "23:30"]
time = [1600, 1630, 1700, 1730, 1800, 1830, 1900, 1930, 2000, 2030, 2100, 2130, 2200, 2230, 2300, 2330, 2400]
data = pd.read_csv('dataset/restaurant-1-orders.csv', usecols=['OrderDate'])
data['OrderDate'].astype(str)
data['OrderDay'] = data['OrderDate'].apply(changeDate)
data['OrderTime'] = data['OrderDate'].apply(changeDate2)
data['OrderDay'] = data.OrderDay.astype(str).astype(int)
data['OrderTime'] = data.OrderTime.astype(str).astype(int)
data = data.loc[data['OrderDay'] == int(date)]
ordersByTime = []
for i in range(len(time)-1):
tmp = data.loc[(data['OrderTime']>=time[i]) & (data['OrderTime']<=time[i+1])]['OrderTime'].count()
ordersByTime.append(tmp)
return render_template("orders.html", ordersByTime=ordersByTime, timeLabel=timeLabel, date1=date1)
@app.route('/stock', methods=['GET'])
def stock():
date = "2021-04-01"
stockDate = "20210401"
if request.args.get("date") != None:
date = request.args.get("date")
stockDate = re.sub(r"-", '', request.args.get("date"))
dataMenu = pd.read_csv('dataset/ingredients_menu.csv')
dataMenu = dataMenu.to_numpy()
menu = {}
[rows, cols] = dataMenu.shape
for i in range(rows):
if dataMenu[i, 0] in menu:
menu[dataMenu[i,0]][dataMenu[i,1]] = dataMenu[i, 2]
else:
menu[dataMenu[i,0]] = {}
menu[dataMenu[i,0]][dataMenu[i,1]] = dataMenu[i, 2]
menu[dataMenu[i,0]]["price"] = dataMenu[i, 3]
dataStock = pd.read_csv('dataset/myDataStock.csv')
def changeDate3(date):
res = date[0:4] + date[5:7] + date[8:10]
return res
dataStock['Stock'] = dataStock.Stock.astype(str).astype(int)
dataStock['Date'] = dataStock['Date'].apply(changeDate3)
dataStock = dataStock.loc[(dataStock['Date'] == stockDate) & (dataStock['Stock']>150)]
dataStock = dataStock.to_numpy()
dataStock = dataStock[np.argsort(dataStock[:,3])]
dataStock = np.flip(dataStock, 0)
stock = {}
stockData = {}
[rows, cols] = dataStock.shape
for i in range(rows):
stock[dataStock[i, 2]] = dataStock[i, 3]/300
stockData[dataStock[i, 2]] = dataStock[i, 3]
nodish = {'name': '', 'detail': 'no more dish to be recommended'}
score = [0, 0, 0]
dishes = [nodish, nodish, nodish]
def recommend(score, myscore):
for i in range(len(score)):
if myscore>score[i]:
return i
return -1
for key, value in menu.items():
myScore = 0
detail = ""
for key2, value2 in value.items():
if key2 == 'price':
myScore += (value2/40)
continue
if key2 in stock:
myScore += (stock[key2]*value2)
continue
if detail == "":
detail = detail + key2
else:
detail = detail + ", " + key2
index = recommend(score, myScore)
if index >= 0:
score[index] = myScore
if detail != "":
detail = "Need more " + detail + " to prepare this dish !"
dishes[index] = {'name': key, 'detail': detail}
return render_template("stock.html", stockData=stockData, dishes=dishes, date=date)
@app.route('/predict', methods=['GET'])
def predict():
sc = joblib.load('dataset/scaler.gz')
ordersDI =
|
pd.read_csv("dataset/dinein.csv")
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import scipy.integrate
import matplotlib.pyplot as plt
import copy
import warnings
plt.rcParams['font.family'] = 'Arial'
plt.rcParams['font.size'] = 12
class TensileTest:
'''Process tensile testing data.
Load a tensile test data and process it
in order to deliver the material
properties.
Warnings
--------
All values are meant to be in the SI
units. Since no unit conversion is made,
the input data has to be in the SI
units.
Attributes
----------
originalFile : str
Path to the file from which the data was read.
force : numpy.ndarray
Force data from the tensile test.
displacement : numpy.ndarray
Displacement data from the tensile test.
time : numpy.ndarray
Time instant data from the tensile test.
length : float
Gage length of the specimen.
diameter : float
Diameter of the specimen.
area : float
Cross section area of the specimen.
:math:`A = \dfrac{\pi \ D}{4}`
being :math:`D` the diameter of the
specimen.
strain : numpy.ndarray
Strain data of the tensile test.
:math:`\epsilon = \dfrac{l - l_0}{l_0} = \dfrac{d}{l_0}`
being :math:`l_0` the initial length.
stress : numpy.ndarray
Stress data of the tensile test.
:math:`\sigma = \dfrac{F}{A}`
being :math:`F` the force and
:math:`A` the cross section area.
realStrain : numpy.ndarray
Strain for the real curve.
:math:`\epsilon_r = ln(1 + \epsilon)`.
realStress : numpy.ndarray
Stress for the real curve.
:math:`\sigma_r = \sigma \ (1 + \epsilon)`.
proportionalityStrain, proportionalityStrength : float
Stress and strain values at the proportionality
limit point.
yieldStrain, yieldStrength : float
Stress and strain values at the yield point.
ultimateStrain, ultimateStrength : float
Stress and strain values at the ultimate point.
strengthCoefficient, strainHardeningExponent : float
Those are coefficients for the Hollomon's
equation during the plastic deformation. It
represents the hardening behavior of the
material.
Hollomon's equation:
:math:`\sigma = K \ \epsilon^{n}`
being :math:`K` the strength coefficient
and :math:`n` the strain hardening exponent.
elasticStrain, elasticStress : numpy.ndarray
Strain and stress data when the material
behaves elastically.
plasticStrain, plasticStress : numpy.ndarray
Strain and stress data when the material
behaves plastically.
neckingStrain, neckingStress : numpy.ndarray
Strain and stress data when the
necking starts at the material.
elasticModulus : float
Elastic modulus value.
resilienceModulus : float
Resilience modulus value. It is the energy
which the material absorbs per unit of volume
during its elastic deformation.
toughnessModulus : float
Resilience modulus value. It is the energy
which the material absorbs per unit of volume
until its failure.
See Also
--------
`Tensile testing wikipedia page <https://en.wikipedia.org/wiki/Tensile_testing>`_
`Stress-Strain curve wikipedia page <https://en.wikipedia.org/wiki/Stress%E2%80%93strain_curve>`_
Notes
-----
.. list-table:: Title
:widths: 5 25 15
:header-rows: 1
* - Symbol
- Description
- Definition
* - :math:`[F]`
- force
- input
* - :math:`[d]`
- displacement
- input
* - :math:`[t]`
- time
- input
* - :math:`l_0`
- specimen length
- input
* - :math:`D`
- specimen diameter
- input
* - :math:`A`
- specimen cross section area
- :math:`A = \dfrac{\pi \ D^2}{4}`
* - :math:`[\epsilon]`
- strain
- :math:`\epsilon = \dfrac{l - l_0}{l_0} = \dfrac{d}{l_0}`
* - :math:`[\sigma]`
- stress
- :math:`\sigma = \dfrac{F}{A}`
* - :math:`[\epsilon_r]`
- real strain
- :math:`\epsilon_r = ln(1 + \epsilon)`
* - :math:`[\sigma_r]`
- real stress
- :math:`\sigma_r = \sigma \ (1 + \epsilon)`
* - :math:`\epsilon_{pr},\sigma_{pr}`
- proportionality strain and strength
- algorithm defined
* - :math:`\epsilon_y,\sigma_y`
- yield strain and strength
- algorithm defined
* - :math:`\epsilon_u,\sigma_u`
- ultimate strain and strength
- algorithm defined
* - :math:`K`
- strength coefficient
- algorithm defined
* - :math:`n`
- strain hardening exponent
- algorithm defined
* - :math:`[\epsilon_e]`
- elastic strain
- :math:`[\epsilon][\epsilon < \epsilon_y]`
* - :math:`[\sigma_e]`
- elastic stress
- :math:`[\sigma][\epsilon < \epsilon_y]`
* - :math:`[\epsilon_p]`
- plastic strain
- :math:`[\epsilon][\epsilon_y < \epsilon < \epsilon_u]`
* - :math:`[\sigma_p]`
- plastic stress
- :math:`[\sigma][\epsilon_y < \epsilon < \epsilon_u]`
* - :math:`[\epsilon_n]`
- necking strain
- :math:`[\epsilon][\epsilon_u < \epsilon]`
* - :math:`[\sigma_n]`
- necking stress
- :math:`[\sigma][\epsilon_u < \epsilon]`
* - :math:`E`
- elastic modulus
- :math:`\sigma = E \ \epsilon`, curve fit
* - :math:`U_r`
- resilience modulus
- :math:`\displaystyle\int\limits_{[\epsilon_e]}\sigma \ \mathrm{d}\epsilon`
* - :math:`U_t`
- toughness modulus
- :math:`\displaystyle\int\limits_{[\epsilon]}\sigma \ \mathrm{d}\epsilon`
**Auto-find proportionality limit and elastic modulus**::
foreach l in range(10, len(strain)):
fit a one-degree polynomial to the data
store the linear coefficient
store the curve fit residual
select the proportionality limit point as the one with the smallest residual
select the elastic modulus as the linear coefficient of the polynomial
**Ultimate point**::
Select the ultimate point as the one
with the maximum stress
**Yield point**::
select the yield point as the intersection of the curves:
([strain], [stress])
([strain], elasticModulus * ([strain]-0.002))
if the point has strain larger than the ultimate point:
select the yield point as equals to the
proportionality limit point
**Hardening, strength coefficient and strain hardening exponent**::
Curve fit (Hollomon's equation):
f = K * strain**n
x = [plastic strain]
y = [plastic stress]
'''
def __init__(self, file, length, diameter):
'''Process tensile data.
Parameters
----------
file : str
Path to file containing the data.
The data from the file is not
checked in any way. The file must
be in the comma-separated-value
format.
length : float
Length :math:`l_0` of the specimen
in meters.
diameter : float
Diameter :math:`D` of the specimen
in meters.
Examples
--------
>>> import mechanical_testing as mect
>>> tensile = mect.TensileTest(
file = './test/data/tensile/tensile_steel_1045.csv,
length = 75.00E-3,
diameter = 10.00E-3,
)
>>> tensile.yieldStrength
7.6522E+8
'''
self._readFromFile(file)
self._defineDimensions(length, diameter)
self._defineEngineeringCurve()
self._defineRealCurve()
self._defineElasticModulusAndProportionalityLimit()
self._defineYieldStrength()
self._defineUltimateStrength()
self._correctYieldStrength()
self._defineElasticBehavior()
self._definePlasticBehavior()
self._defineNeckingBehavior()
self._defineResilienceModulus()
self._defineToughnessModulus()
self._defineHardening()
return
def _readFromFile(self, file):
df =
|
pd.read_csv(filepath_or_buffer=file)
|
pandas.read_csv
|
"""
Dataframe-like class to hold general energy-related timeseries; either volume ([MW] or
[MWh]), price ([Eur/MWh]) or both; in all cases there is a single timeseries for each.
"""
from __future__ import annotations
from . import single_helper
from .base import PfLine
from .. import changefreq
from typing import Dict, Iterable, Union
import pandas as pd
import numpy as np
class SinglePfLine(PfLine):
"""Portfolio line without children. Has a single dataframe; .children is the empty
dictionary.
Parameters
----------
data: Any
Generally: object with one or more attributes or items ``w``, ``q``, ``r``, ``p``;
all timeseries. Most commonly a ``pandas.DataFrame`` or a dictionary of
``pandas.Series``, but may also be e.g. another PfLine object.
Returns
-------
SinglePfLine
Notes
-----
* If the timeseries or values in ``data`` do not have a ``pint`` data type, the
standard units are assumed (MW, MWh, Eur, Eur/MWh).
* If the timeseries or values in ``data`` do have a ``pint`` data type, they are
converted into the standard units.
"""
def __new__(cls, data):
# Catch case where data is already a valid class instance.
if isinstance(data, SinglePfLine):
return data # TODO: return copy
# Otherwise, do normal thing.
return super().__new__(cls, data)
def __init__(self, data: Union[PfLine, Dict, pd.DataFrame, pd.Series]):
self._df = single_helper.make_dataframe(data)
# Implementation of ABC methods.
@property
def children(self) -> Dict:
return {}
@property
def index(self) -> pd.DatetimeIndex:
return self._df.index
@property
def w(self) -> pd.Series:
if self.kind == "p":
return pd.Series(np.nan, self.index, name="w", dtype="pint[MW]")
else:
return
|
pd.Series(self.q / self.index.duration, name="w")
|
pandas.Series
|
#!/usr/bin/env python3
import sys
import argparse
import seaborn
from evalys import *
from evalys.jobset import *
from evalys.mstates import *
from evalys.pstates import *
from evalys.visu.legacy import *
import pandas as pd
import matplotlib.pyplot as plt
def main():
# Argument parsing
parser = argparse.ArgumentParser(description='Draws the states the machines are in over time')
parser.add_argument('--mstatesCSV', '-m', nargs='+',
help='The name of the CSV file which contains pstate information')
parser.add_argument('--jobsCSV', '-j', nargs='+',
help='The name of the CSV file which contains jobs information')
parser.add_argument('--pstatesCSV', '-p', nargs='+',
help='The name of the CSV file which contains pstate information')
parser.add_argument('--energyCSV', '-e', nargs='+',
help='The name of the CSV file which contains energy consumption information')
parser.add_argument('--llhCSV', '-l', nargs='+',
help='The name of the CSV file which contains LLH information')
parser.add_argument('--llh-bound',
type=float,
help='If set, draws a LLH horizontal line on this bound')
parser.add_argument('--priority-job-waiting-time-bound',
type=float,
help='If set, draws an horizon line corresponding to this bound')
parser.add_argument('--time-window', nargs='+',
type=float,
help="If set, limits the time window of study. Example: 0 4200")
parser.add_argument('--force-right-adjust',
type=float,
help='If set, forces the right adjustement of the plot.')
parser.add_argument('--off', nargs='+',
help='The power states which correspond to OFF machine states')
parser.add_argument('--switchon', nargs='+',
help='The power states which correspond to a switching ON machine state')
parser.add_argument('--switchoff', nargs='+',
help='The power states which correspond to switching OFF machine state')
parser.add_argument('--names', nargs='+',
default=['Unnamed'],
help='When multiple instances must be plotted, their names must be given via this parameter.')
parser.add_argument('--output', '-o',
help='The output file (format depending on the given extension, pdf is RECOMMENDED). For example: figure.pdf')
parser.add_argument("--gantt", action='store_true',
help="If set, the gantt chart will be outputted. Requires jobs, pstates and probably machine values (--off, --switchon, --switchoff)")
parser.add_argument("--ru", action='store_true',
help="If set, the resource usage will be outputted. Requires machine states")
parser.add_argument("--power", action='store_true',
help="If set, the instantaneous power will be outputted. Requires energyCSV")
parser.add_argument("--energy", action='store_true',
help="If set, the cumulated energy consumption will be outputted. Requires energyCSV")
parser.add_argument('--llh', action='store_true',
help='If set, the LLH will be outputted. Requires llhCSV. Jobs are optional.')
parser.add_argument('--load-in-queue', action='store_true',
help='If set, the load in queue will be outputted. Requires llhCSV.')
parser.add_argument('--nb-jobs-in-queue', action='store_true',
help='If set, the number of jobs in queue will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-size', action='store_true',
help='If set, the size of the priority job will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-expected-waiting-time', action='store_true',
help='If set, the expected waiting time of the priority job will be outputted. Requires llhCSV.')
parser.add_argument('--priority-job-starting-expected-soon', action='store_true',
help='If set, whether the priority job is expected to start soon will be outputted. Requires llhCSV')
args = parser.parse_args()
###################
# Figure creation #
###################
nb_instances = None
nb_subplots = 0
left_adjust = 0.05
top_adjust = 0.95
bottom_adjust = 0.05
right_adjust = 0.95
if args.gantt:
assert(args.jobsCSV), "Jobs must be given to compute the gantt chart!"
nb_jobs_csv = len(args.jobsCSV)
if args.pstatesCSV:
nb_pstates_csv = len(args.pstatesCSV)
assert(nb_jobs_csv == nb_pstates_csv), "The number of jobs_csv ({}) should equal the number of pstates_csv ({})".format(nb_jobs_csv, nb_pstates_csv)
nb_gantt = nb_jobs_csv
nb_subplots += nb_gantt
nb_instances = nb_gantt
if args.ru:
assert(args.mstatesCSV), "Mstates must be given to compute the resource usage!"
right_adjust = min(right_adjust, 0.85)
nb_ru = len(args.mstatesCSV)
nb_subplots += nb_ru
if nb_instances is not None:
assert(nb_instances == nb_ru), 'Inconsistent number of instances (nb_ru={} but already got nb_instances={})'.format(nb_ru, nb_instances)
else:
nb_instances = nb_ru
if args.power:
assert(args.energyCSV), "EnergyCSV must be given to compute power!"
nb_subplots += 1
right_adjust = min(right_adjust, 0.85)
if args.energy:
assert(args.energyCSV), "EnergyCSV must be given to compute energy!"
nb_energy = 1
nb_subplots += nb_energy
right_adjust = min(right_adjust, 0.85)
if args.energyCSV:
nb_energy_csv = len(args.energyCSV)
if nb_instances is not None:
assert(nb_instances == nb_energy_csv), 'Inconsistent number of instances (nb_energy_csv={} but already got nb_instances={})'.format(nb_energy_csv, nb_instances)
else:
nb_instances = nb_energy_csv
if args.llh:
assert(args.llhCSV), "LLH_CSV must be given to compute llh!"
right_adjust = min(right_adjust, 0.85)
nb_subplots += 1
if args.load_in_queue:
assert(args.llhCSV), "LLH_CSV must be given to compute llh!"
nb_subplots += 1
if args.nb_jobs_in_queue:
assert(args.llhCSV), "LLH_CSV must be given to compute the queue!"
nb_subplots += 1
if args.priority_job_size:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.priority_job_expected_waiting_time:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.priority_job_starting_expected_soon:
assert(args.llhCSV), "LLH_CSV must be given to compute the priority job size!"
nb_subplots += 1
if args.llhCSV:
nb_llh_csv = len(args.llhCSV)
if nb_instances is not None:
assert(nb_instances == nb_llh_csv), 'Inconsistent number of instances (nb_llh_csv={} but already got nb_instances={})'.format(nb_llh_csv, nb_instances)
else:
nb_instances = nb_llh_csv
if nb_subplots == 0:
print('There is nothing to plot!')
sys.exit(0)
names = args.names
assert(nb_instances == len(names)), 'The number of names ({} in {}) should equal the number of instances ({})'.format(len(names), names, nb_instances)
if args.force_right_adjust:
right_adjust = args.force_right_adjust
fig, ax_list = plt.subplots(nb_subplots, sharex=True, sharey=False)
fig.subplots_adjust(bottom=bottom_adjust,
right=right_adjust,
top=top_adjust,
left=left_adjust)
#fig.tight_layout()
if nb_subplots < 2:
ax_list = [ax_list]
##########################################
# Create data structures from input args #
##########################################
time_min = None
time_max = None
if args.time_window:
time_min, time_max = [float(f) for f in args.time_window]
jobs = list()
if args.jobsCSV and (args.gantt or args.llhCSV):
for csv_filename in args.jobsCSV:
jobs.append(JobSet.from_csv(csv_filename))
pstates = list()
if args.pstatesCSV and args.gantt:
for csv_filename in args.pstatesCSV:
pstates.append(PowerStatesChanges(csv_filename))
machines = list()
if args.mstatesCSV and args.ru:
for csv_filename in args.mstatesCSV:
machines.append(MachineStatesChanges(csv_filename, time_min, time_max))
llh = list()
if args.llhCSV:
for csv_filename in args.llhCSV:
llh_data =
|
pd.read_csv(csv_filename)
|
pandas.read_csv
|
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Custom pandas accessors for signals data.
Methods can be accessed as follows:
* `SignalsSRAccessor` -> `pd.Series.vbt.signals.*`
* `SignalsDFAccessor` -> `pd.DataFrame.vbt.signals.*`
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.signals.accessors.SignalsAccessor.pos_rank
>>> pd.Series([False, True, True, True, False]).vbt.signals.pos_rank()
0 0
1 1
2 2
3 3
4 0
dtype: int64
```
The accessors extend `vectorbt.generic.accessors`.
!!! note
The underlying Series/DataFrame should already be a signal series.
Input arrays should be `np.bool_`.
Grouping is only supported by the methods that accept the `group_by` argument.
Accessors do not utilize caching.
Run for the examples below:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> mask = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, False, False]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> mask
a b c
2020-01-01 True True True
2020-01-02 False False True
2020-01-03 False True True
2020-01-04 False False False
2020-01-05 False True False
```
## Stats
!!! hint
See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `SignalsAccessor.metrics`.
```python-repl
>>> mask.vbt.signals.stats(column='a')
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaT
Distance: Max NaT
Distance: Mean NaT
Distance: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can pass another signal array to compare this array with:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(other=mask['b']))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 1
Rate [%] 20
Total Overlapping 1
Overlapping Rate [%] 33.3333
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance -> Other: Min 0 days 00:00:00
Distance -> Other: Max 0 days 00:00:00
Distance -> Other: Mean 0 days 00:00:00
Distance -> Other: Std NaT
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std NaT
Partition Distance: Min NaT
Partition Distance: Max NaT
Partition Distance: Mean NaT
Partition Distance: Std NaT
Name: a, dtype: object
```
We can also return duration as a floating number rather than a timedelta:
```python-repl
>>> mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False))
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5
Total 1
Rate [%] 20
First Index 2020-01-01 00:00:00
Last Index 2020-01-01 00:00:00
Norm Avg Index [-1, 1] -1
Distance: Min NaN
Distance: Max NaN
Distance: Mean NaN
Distance: Std NaN
Total Partitions 1
Partition Rate [%] 100
Partition Length: Min 1
Partition Length: Max 1
Partition Length: Mean 1
Partition Length: Std NaN
Partition Distance: Min NaN
Partition Distance: Max NaN
Partition Distance: Mean NaN
Partition Distance: Std NaN
Name: a, dtype: object
```
`SignalsAccessor.stats` also supports (re-)grouping:
```python-repl
>>> mask.vbt.signals.stats(column=0, group_by=[0, 0, 1])
Start 2020-01-01 00:00:00
End 2020-01-05 00:00:00
Period 5 days 00:00:00
Total 4
Rate [%] 40
First Index 2020-01-01 00:00:00
Last Index 2020-01-05 00:00:00
Norm Avg Index [-1, 1] -0.25
Distance: Min 2 days 00:00:00
Distance: Max 2 days 00:00:00
Distance: Mean 2 days 00:00:00
Distance: Std 0 days 00:00:00
Total Partitions 4
Partition Rate [%] 100
Partition Length: Min 1 days 00:00:00
Partition Length: Max 1 days 00:00:00
Partition Length: Mean 1 days 00:00:00
Partition Length: Std 0 days 00:00:00
Partition Distance: Min 2 days 00:00:00
Partition Distance: Max 2 days 00:00:00
Partition Distance: Mean 2 days 00:00:00
Partition Distance: Std 0 days 00:00:00
Name: 0, dtype: object
```
## Plots
!!! hint
See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `SignalsAccessor.subplots`.
This class inherits subplots from `vectorbt.generic.accessors.GenericAccessor`.
"""
import warnings
import numpy as np
import pandas as pd
from vectorbt import _typing as tp
from vectorbt.base import reshape_fns
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.generic import plotting
from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor
from vectorbt.generic.ranges import Ranges
from vectorbt.records.mapped_array import MappedArray
from vectorbt.root_accessors import register_dataframe_vbt_accessor, register_series_vbt_accessor
from vectorbt.signals import nb
from vectorbt.utils import checks
from vectorbt.utils.colors import adjust_lightness
from vectorbt.utils.config import merge_dicts, Config
from vectorbt.utils.decorators import class_or_instancemethod
from vectorbt.utils.template import RepEval
__pdoc__ = {}
class SignalsAccessor(GenericAccessor):
"""Accessor on top of signal series. For both, Series and DataFrames.
Accessible through `pd.Series.vbt.signals` and `pd.DataFrame.vbt.signals`."""
def __init__(self, obj: tp.SeriesFrame, **kwargs) -> None:
checks.assert_dtype(obj, np.bool_)
GenericAccessor.__init__(self, obj, **kwargs)
@property
def sr_accessor_cls(self) -> tp.Type["SignalsSRAccessor"]:
"""Accessor class for `pd.Series`."""
return SignalsSRAccessor
@property
def df_accessor_cls(self) -> tp.Type["SignalsDFAccessor"]:
"""Accessor class for `pd.DataFrame`."""
return SignalsDFAccessor
# ############# Overriding ############# #
def bshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.bshift` with `fill_value=False`."""
return GenericAccessor.bshift(self, *args, fill_value=fill_value, **kwargs)
def fshift(self, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.generic.accessors.GenericAccessor.fshift` with `fill_value=False`."""
return GenericAccessor.fshift(self, *args, fill_value=fill_value, **kwargs)
@classmethod
def empty(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty` with `fill_value=False`."""
return GenericAccessor.empty(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
@classmethod
def empty_like(cls, *args, fill_value: bool = False, **kwargs) -> tp.SeriesFrame:
"""`vectorbt.base.accessors.BaseAccessor.empty_like` with `fill_value=False`."""
return GenericAccessor.empty_like(*args, fill_value=fill_value, dtype=np.bool_, **kwargs)
# ############# Generation ############# #
@classmethod
def generate(cls,
shape: tp.RelaxedShape,
choice_func_nb: tp.ChoiceFunc, *args,
pick_first: bool = False,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.signals.nb.generate_nb`.
`**kwargs` will be passed to pandas constructor.
## Example
Generate random signals manually:
```python-repl
>>> @njit
... def choice_func_nb(from_i, to_i, col):
... return col + from_i
>>> pd.DataFrame.vbt.signals.generate((5, 3),
... choice_func_nb, index=mask.index, columns=mask.columns)
a b c
2020-01-01 True False False
2020-01-02 False True False
2020-01-03 False False True
2020-01-04 False False False
2020-01-05 False False False
```
"""
checks.assert_numba_func(choice_func_nb)
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result = nb.generate_nb(shape, pick_first, choice_func_nb, *args)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return
|
pd.DataFrame(result, **kwargs)
|
pandas.DataFrame
|
"""Test solvent-accessible surface area methods."""
import logging
import json
from pathlib import Path
import pytest
import yaml
from scipy.stats import linregress
import numpy as np
import pandas as pd
from osmolytes.sasa import SolventAccessibleSurface, ReferenceModels
from osmolytes.pqr import parse_pqr_file, Atom, aggregate, count_residues
_LOGGER = logging.getLogger(__name__)
with open("tests/data/alkanes/alkanes.json", "rt") as json_file:
ATOM_AREAS = json.load(json_file)
PROTEIN_PATH = Path("tests/data/proteins")
@pytest.mark.parametrize("radius", [0.25, 0.5, 1.0, 2.0, 4.0])
def test_one_sphere_sasa(radius, tmp_path):
"""Test solvent-accessible surface areas for one sphere."""
atom = Atom()
atom.position = np.random.randn(3)
frac = np.random.rand(1)[0]
atom.radius = frac * radius
probe_radius = (1.0 - frac) * radius
xyz_path = Path(tmp_path) / "sphere.xyz"
sas = SolventAccessibleSurface(
[atom], probe_radius, 200, xyz_path=xyz_path
)
atom_sasa = sas.atom_surface_area(0)
ref_sasa = 4.0 * np.pi * radius * radius
_LOGGER.info(
f"Radius: {radius}, Test area: {atom_sasa}, Ref area: {ref_sasa}"
)
np.testing.assert_almost_equal(atom_sasa, ref_sasa)
def two_sphere_area(radius1, radius2, distance):
"""Area of two overlapping spheres.
:param float radius1: radius of sphere1
:param float radius2: radius of sphere2
:param float distance: distance between centers of spheres
:returns: exposed areas of spheres
:rtype: (float, float)
"""
distsq = distance * distance
rad1sq = radius1 * radius1
rad2sq = radius2 * radius2
full_area1 = 4 * np.pi * rad1sq
full_area2 = 4 * np.pi * rad2sq
if distance > (radius1 + radius2):
return (full_area1, full_area2)
elif distance <= np.absolute(radius1 - radius2):
if full_area1 > full_area2:
return (full_area1, 0)
if full_area1 < full_area2:
return (0, full_area2)
else:
return (0.5 * full_area1, 0.5 * full_area2)
else:
if radius1 > 0:
cos_theta1 = (rad1sq + distsq - rad2sq) / (2 * radius1 * distance)
cap1_area = 2 * np.pi * radius1 * radius1 * (1 - cos_theta1)
else:
cap1_area = 0
if radius2 > 0:
cos_theta2 = (rad2sq + distsq - rad1sq) / (2 * radius2 * distance)
cap2_area = 2 * np.pi * radius2 * radius2 * (1 - cos_theta2)
else:
cap2_area = 0
return (full_area1 - cap1_area, full_area2 - cap2_area)
@pytest.mark.parametrize("radius", [0.0, 1.1, 2.2, 4.4, 6.6, 8.8])
def test_two_sphere_sasa(radius, tmp_path):
"""Test solvent accessible surface areas for two spheres."""
atom_tolerance = 0.02
total_tolerance = 0.02
probe_radius = 0.0
big_atom = Atom()
big_atom.radius = radius
big_atom.position = np.array([0, 0, 0])
little_atom = Atom()
little_atom.radius = 1.0
test_atom_areas = []
test_total_areas = []
ref_atom_areas = []
ref_total_areas = []
distances = np.linspace(0, (big_atom.radius + little_atom.radius), num=20)
for distance in distances:
_LOGGER.debug("Distance = %g", distance)
little_atom.position = np.array(3 * [distance / np.sqrt(3)])
xyz_path = Path(tmp_path) / f"spheres-{distance}.xyz"
sas = SolventAccessibleSurface(
[big_atom, little_atom], probe_radius, 300, xyz_path=xyz_path
)
test = np.array([sas.atom_surface_area(0), sas.atom_surface_area(1)])
test_total_areas.append(test.sum())
test_atom_areas.append(test)
ref = np.array(
two_sphere_area(big_atom.radius, little_atom.radius, distance)
)
ref_total_areas.append(ref.sum())
ref_atom_areas.append(ref)
test_atom_areas = np.array(test_atom_areas)
test_total_areas = np.array(test_total_areas)
ref_atom_areas = np.array(ref_atom_areas)
ref_total_areas = np.array(ref_total_areas)
rel_difference = np.absolute(
np.divide(test_atom_areas - ref_atom_areas, np.sum(ref_atom_areas))
)
errors = []
if np.any(rel_difference > atom_tolerance):
ref_series = pd.Series(index=distances, data=ref_total_areas)
ref_series.index.name = "Dist"
ref_df = pd.DataFrame(index=distances, data=ref_atom_areas)
ref_df.index.name = "Dist"
test_df = pd.DataFrame(index=distances, data=test_atom_areas)
test_df.index.name = "Dist"
abs_diff_df = pd.DataFrame(
index=distances,
data=np.absolute(ref_atom_areas - test_atom_areas),
)
abs_diff_df.index.name = "Dist"
rel_diff_df =
|
pd.DataFrame(index=distances, data=rel_difference)
|
pandas.DataFrame
|
import pandas as pd
import datetime
import numpy as np
df_1 = pd.read_csv('doc1.txt', encoding='ISO-8859-1', sep = ';')
df_2 = pd.read_csv("doc2.txt", encoding="ISO-8859-1", sep = ";")
df_3 = pd.read_csv('/content/drive/MyDrive/2022/Aplicação/TratarDados/ARQUIVOS TESTE/21055Z1 - 300.txt', encoding='ISO-8859-1', sep = ';')
df_4 = pd.read_csv('/content/drive/MyDrive/2022/Aplicação/TratarDados/ARQUIVOS TESTE/21058N1 - 400.txt', encoding='ISO-8859-1', sep = ';')
df_5 = pd.read_csv('/content/drive/MyDrive/2022/Aplicação/TratarDados/ARQUIVOS TESTE/53325Z2A.1- 500.txt', encoding='ISO-8859-1', sep = ';')
df_6 =
|
pd.read_csv('/content/drive/MyDrive/2022/Aplicação/TratarDados/ARQUIVOS TESTE/53555Z2.2 - 300.txt', encoding='ISO-8859-1', sep = ';')
|
pandas.read_csv
|
# coding=utf-8
# Author: <NAME>
# Date: Jan 13, 2020
#
# Description: Reads a gene-feature table (X) and computed machine learning models
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
#
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction import DictVectorizer
#
from utils import ensurePathExists
from collections import OrderedDict
import argparse
def get_module_dict_from_module_id(mid, modules):
for module in modules:
if module['id'] == mid:
return module
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=['spermatocyte', 'enterocyte'], help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument('--layer', default='DM', type=str, choices=['HS', 'MM', 'DM'], help="Layer/Species.")
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
layer = species = args.layer
network = 'thr' # 'thr'
threshold = 0.5
threshold_str = str(threshold).replace('.', 'p')
print('Load X-y features')
rMLFile = 'results/matrix-x-vector-y/ml-{celltype:s}-{layer:s}-X-y.csv.gz'.format(celltype=celltype, layer=layer)
df =
|
pd.read_csv(rMLFile, index_col=0)
|
pandas.read_csv
|
import pandas as pd
try:
from boolean1_neg import boolean1
except ImportError:
from contra_qa.text_generation.boolean1_neg import boolean1
try:
from boolean2_S_and import boolean2
except ImportError:
from contra_qa.text_generation.boolean2_S_and import boolean2
try:
from boolean3_NP_and import boolean3
except ImportError:
from contra_qa.text_generation.boolean3_NP_and import boolean3
try:
from boolean4_VP_and import boolean4
except ImportError:
from contra_qa.text_generation.boolean4_VP_and import boolean4
try:
from boolean5_AP_and import boolean5
except ImportError:
from contra_qa.text_generation.boolean5_AP_and import boolean5
try:
from boolean6_implicit_and import boolean6
except ImportError:
from contra_qa.text_generation.boolean6_implicit_and import boolean6
try:
from boolean7_S_or import boolean7
except ImportError:
from contra_qa.text_generation.boolean7_S_or import boolean7
try:
from boolean8_NP_or import boolean8
except ImportError:
from contra_qa.text_generation.boolean8_NP_or import boolean8
try:
from boolean9_VP_or import boolean9
except ImportError:
from contra_qa.text_generation.boolean9_VP_or import boolean9
try:
from boolean10_AP_or import boolean10
except ImportError:
from contra_qa.text_generation.boolean10_AP_or import boolean10
def create_all():
boolean1()
boolean2()
boolean3()
boolean4()
boolean5()
boolean6()
boolean7()
boolean8()
boolean9()
boolean10()
# creating the AND dataset
df2_tr = pd.read_csv("data/boolean2_train.csv")
df3_tr = pd.read_csv("data/boolean3_train.csv")
df4_tr = pd.read_csv("data/boolean4_train.csv")
df5_tr = pd.read_csv("data/boolean5_train.csv")
df6_tr = pd.read_csv("data/boolean6_train.csv")
df2_te = pd.read_csv("data/boolean2_test.csv")
df3_te = pd.read_csv("data/boolean3_test.csv")
df4_te = pd.read_csv("data/boolean4_test.csv")
df5_te = pd.read_csv("data/boolean5_test.csv")
df6_te = pd.read_csv("data/boolean6_test.csv")
train_and = [df2_tr, df3_tr, df4_tr, df5_tr, df6_tr]
test_and = [df2_te, df3_te, df4_te, df5_te, df6_te]
df_train_and = pd.concat(train_and)
df_test_and = pd.concat(test_and)
df_train_and = df_train_and.sample(frac=1).reset_index(drop=True)
df_test_and = df_test_and.sample(frac=1).reset_index(drop=True)
df_train_and = df_train_and.iloc[:10000]
df_test_and = df_test_and.iloc[:1000]
df_train_and.to_csv("data/boolean_AND_train.csv", index=False)
df_test_and.to_csv("data/boolean_AND_test.csv", index=False)
# creating the OR dataset
df7_tr = pd.read_csv("data/boolean7_train.csv")
df8_tr = pd.read_csv("data/boolean8_train.csv")
df9_tr = pd.read_csv("data/boolean9_train.csv")
df10_tr = pd.read_csv("data/boolean10_train.csv")
df7_te = pd.read_csv("data/boolean7_test.csv")
df8_te = pd.read_csv("data/boolean8_test.csv")
df9_te = pd.read_csv("data/boolean9_test.csv")
df10_te =
|
pd.read_csv("data/boolean10_test.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 273.65], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 273.84999999999997], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 273.34999999999997], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tidy_metric():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"parameter",
"date",
"value",
"quality",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 28),
"dataset": pd.Categorical(["climate_summary"] * 28),
"parameter": pd.Categorical(
[
"fx",
"fx",
"fm",
"fm",
"rsk",
"rsk",
"rskf",
"rskf",
"sdk",
"sdk",
"shk_tag",
"shk_tag",
"nm",
"nm",
"vpm",
"vpm",
"pm",
"pm",
"tmk",
"tmk",
"upm",
"upm",
"txk",
"txk",
"tnk",
"tnk",
"tgk",
"tgk",
]
),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"value": pd.to_numeric(
[
# FX
pd.NA,
pd.NA,
# FM
pd.NA,
pd.NA,
# RSK
pd.NA,
0.2,
# RSKF
pd.NA,
8,
# SDK
pd.NA,
pd.NA,
# SHK_TAG
pd.NA,
0,
# NM
pd.NA,
100.0,
# VPM
pd.NA,
640.0,
# PM
pd.NA,
100860.0,
# TMK
pd.NA,
273.65,
# UPM
pd.NA,
97.00,
# TXK
pd.NA,
273.84999999999997,
# TNK
pd.NA,
273.34999999999997,
# TGK
pd.NA,
pd.NA,
],
errors="coerce",
).astype(float),
"quality": pd.Series(
[
# FX
np.NaN,
np.NaN,
# FM
np.NaN,
np.NaN,
# RSK
np.NaN,
1,
# RSKF
np.NaN,
1,
# SDK
np.NaN,
np.NaN,
# SHK_TAG
np.NaN,
1,
# NM
np.NaN,
1,
# VPM
np.NaN,
1,
# PM
np.NaN,
1,
# TMK
np.NaN,
1,
# UPM
np.NaN,
1,
# TXK
np.NaN,
1,
# TNK
np.NaN,
1,
# TGK
np.NaN,
np.NaN,
],
dtype=float,
),
},
),
# Needed since pandas 1.2?
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_10_minutes_result_tidy():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationParameter.MINUTE_10.TEMPERATURE_AIR.PRESSURE_AIR_SITE],
resolution=DwdObservationResolution.MINUTE_10,
start_date="1999-12-31 22:00",
end_date="1999-12-31 23:00",
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 7),
"dataset": pd.Categorical(["temperature_air"] * 7),
"parameter": pd.Categorical(["pp_10"] * 7),
"date": [
datetime(1999, 12, 31, 22, 00, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 10, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 20, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 30, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 40, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 50, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 23, 00, tzinfo=pytz.UTC),
],
"value": pd.to_numeric(
[
996.1,
996.2,
996.2,
996.2,
996.3,
996.4,
pd.NA,
],
errors="coerce",
).astype(float),
"quality": pd.to_numeric([1, 1, 1, 1, 1, 1, pd.NA], errors="coerce").astype(float),
},
),
# Needed since pandas 1.2?
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_monthly_tidy():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationParameter.MONTHLY.PRECIPITATION_HEIGHT],
resolution=DwdObservationResolution.MONTHLY,
start_date="2020-01-01",
end_date="2020-12-31",
).filter_by_station_id("00433")
values = request.values.all().df
expected_df = pd.DataFrame(
{
"station_id": pd.Categorical(["00433"] * 12),
"dataset": pd.Categorical(["climate_summary"] * 12),
"parameter":
|
pd.Categorical(["precipitation_height"] * 12)
|
pandas.Categorical
|
#!/usr/bin/env python3
# encoding: utf-8
import re, json, sys
import numpy as np
import pandas as pd
from models.exchange.coinbase_pro import AuthAPI as CBAuthAPI, PublicAPI as CBPublicAPI
def printHelp():
print("Create a config.json:")
print("* Add 1 or more portfolios", "\n")
print("{")
print(' "<portfolio_name>" : {')
print(' "api_key" : "<coinbase_pro_api_key>",')
print(' "api_secret" : "<coinbase_pro_api_secret>",')
print(' "api_pass" : "<coinbase_pro_api_passphrase>",')
print(' "config" : {')
print(' "base_currency" : "<base_symbol>",')
print(' "quote_currency" : "<quote_symbol>"')
print(' "}')
print(" },")
print(' "<portfolio_name>" : {')
print(' "api_key" : "<coinbase_pro_api_key>",')
print(' "api_secret" : "<coinbase_pro_api_secret>",')
print(' "api_pass" : "<coinbase_pro_api_passphrase>",')
print(' "config" : {')
print(' "base_currency" : "<base_symbol>",')
print(' "quote_currency" : "<quote_symbol>"')
print(' "}')
print(" }")
print("}", "\n")
print('<portfolio_name> - Coinbase Pro portfolio name E.g. "Default portfolio"')
print("<coinbase_pro_api_key> - Coinbase Pro API key for the portfolio")
print("<coinbase_pro_api_secret> - Coinbase Pro API secret for the portfolio")
print(
"<coinbase_pro_api_passphrase> - Coinbase Pro API passphrase for the portfolio"
)
print("<base_symbol> - Base currency E.g. BTC")
print("<quote_symbol> - Base currency E.g. GBP")
print("\n")
try:
with open("../.secrets/coinbaseprotracker/coinbaseprotracker-config.json") as config_file:
json_config = json.load(config_file)
if not isinstance(json_config, dict):
raise TypeError("config.json is invalid.")
if len(list(json_config)) < 1:
printHelp()
sys.exit()
df = pd.DataFrame()
for portfolio in list(json_config):
base_currency = ""
quote_currency = ""
market = ""
portfolio_config = json_config[portfolio]
if (
"api_key" in portfolio_config
and "api_secret" in portfolio_config
and "api_pass" in portfolio_config
and "config" in portfolio_config
):
api_key = portfolio_config["api_key"]
api_secret = portfolio_config["api_secret"]
api_pass = portfolio_config["api_pass"]
config = portfolio_config["config"]
if ("cryptoMarket" not in config and "base_currency" not in config) and (
"fiatMarket" not in config and "quote_currency" not in config
):
printHelp()
sys.exit()
if "cryptoMarket" in config:
base_currency = config["cryptoMarket"]
elif "base_currency" in config:
base_currency = config["base_currency"]
if "fiatMarket" in config:
quote_currency = config["fiatMarket"]
elif "base_currency" in config:
quote_currency = config["quote_currency"]
market = base_currency + "-" + quote_currency
api = CBAuthAPI(api_key, api_secret, api_pass)
orders = api.getOrders()
df = pd.concat([df, orders])
transfers = api.getTransfers()
df =
|
pd.concat([df, transfers])
|
pandas.concat
|
import timeboard as tb
from timeboard.interval import Interval, _VoidInterval
from timeboard.workshift import Workshift
from timeboard.exceptions import (OutOfBoundsError, PartialOutOfBoundsError,
VoidIntervalError)
from timeboard.timeboard import _Location, OOB_LEFT, OOB_RIGHT, LOC_WITHIN
import datetime
import pandas as pd
import numpy as np
import pytest
def tb_12_days():
return tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0])
# 31 01 02 03 04 05 06 07 08 09 10 11 12
# 0 1 0 0 1 0 0 1 0 0 1 0 0
class TestIntervalLocatorFromReference(object):
def test_interval_locator_default(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
None, False, False) == [_Location(0, LOC_WITHIN),
_Location(12, LOC_WITHIN)]
def test_interval_locator_with_two_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', '08 Jan 2017 15:00'), False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
# reverse is ok; it is taken care later in 'get_interval'
assert clnd._get_interval_locs_from_reference(
('08 Jan 2017 15:00', '02 Jan 2017 15:00'), False, False) == [
_Location(8, LOC_WITHIN), _Location(2, LOC_WITHIN)]
def test_interval_locator_with_with_excessive_item(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00','08 Jan 2017 15:00','something'), False,
False) == [_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_two_pd_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
(pd.Timestamp('02 Jan 2017 15:00'),
pd.Timestamp('08 Jan 2017 15:00')),
False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_two_datettime_ts(self):
clnd = tb_12_days()
assert clnd._get_interval_locs_from_reference(
(datetime.datetime(2017, 1, 2, 15, 0, 0),
datetime.datetime(2017, 1, 8, 15, 0, 0)),
False, False) == [
_Location(2, LOC_WITHIN), _Location(8, LOC_WITHIN)]
def test_interval_locator_with_OOB_ts(self):
clnd = tb_12_days()
# only one end of the interval is OOB
assert clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(2, LOC_WITHIN), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '08 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(8, LOC_WITHIN)]
# the interval spans over the timeboard
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('13 Jan 2017 15:00', '30 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_LEFT)]
# the interval is completely outside the timeboard
assert clnd._get_interval_locs_from_reference(
('25 Dec 2016 15:00', '30 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_LEFT)]
assert clnd._get_interval_locs_from_reference(
('30 Dec 2016 15:00', '25 Dec 2016 15:00'), False, False) == [
_Location(None, OOB_LEFT), _Location(None, OOB_LEFT)]
assert clnd._get_interval_locs_from_reference(
('13 Jan 2017 15:00', '15 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_RIGHT)]
assert clnd._get_interval_locs_from_reference(
('15 Jan 2017 15:00', '13 Jan 2017 15:00'), False, False) == [
_Location(None, OOB_RIGHT), _Location(None, OOB_RIGHT)]
def test_interval_locator_from_pd_periods(self):
clnd = tb_12_days()
# if we could not directly Timestamp() a reference, we try to call its
# `to_timestamp` method which would return reference's start time
# First day of Jan is inside clnd
assert clnd._get_interval_locs_from_reference(
(pd.Period('02 Jan 2017', freq='M'), '11 Jan 2017 15:00'),
False, False) == [
_Location(1, LOC_WITHIN), _Location(11, LOC_WITHIN)]
# While 31 Dec is within clnd, the first day of Dec is outside
assert clnd._get_interval_locs_from_reference(
(pd.Period('31 Dec 2016', freq='M'), '11 Jan 2017 15:00'),
False, False) == [
_Location(None, OOB_LEFT), _Location(11, LOC_WITHIN)]
# freq=W begins weeks on Mon which is 02 Jan 2017
assert clnd._get_interval_locs_from_reference(
(pd.Period('05 Jan 2017', freq='W'), '11 Jan 2017 15:00'),
False, False) == [
_Location(2, LOC_WITHIN), _Location(11, LOC_WITHIN)]
# freq=W-MON ends weeks on Mondays, and 02 Jan is Monday,
# but this week begins on Tue 27 Dec 2016 which is outside the timeboard
assert clnd._get_interval_locs_from_reference(
(pd.Period('02 Jan 2017', freq='W-MON'), '11 Jan 2017 15:00'),
False, False) == [
_Location(None, OOB_LEFT), _Location(11, LOC_WITHIN)]
def test_interval_locator_with_bad_ts(self):
clnd = tb_12_days()
with pytest.raises(ValueError):
clnd._get_interval_locs_from_reference(
('bad_timestamp', '08 Jan 2017 15:00'), False, False)
with pytest.raises(ValueError):
clnd._get_interval_locs_from_reference(
('02 Jan 2017 15:00', 'bad_timestamp'), False, False)
def test_interval_locator_with_singletons(self):
clnd = tb_12_days()
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference(('08 Jan 2017 15:00',),
False, False)
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference('08 Jan 2017 15:00',
False, False)
with pytest.raises(TypeError):
clnd._get_interval_locs_from_reference(
pd.Timestamp('08 Jan 2017 15:00'), False, False)
class TestIntervalStripLocs(object):
def test_interval_strip_locs(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], False, False) \
== [_Location(2,'anything'),_Location(8, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], True, False) \
== [_Location(3,'anything'),_Location(8, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], False, True) \
== [_Location(2,'anything'),_Location(7, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(8, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(7, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(4, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(3, 'whatever')]
def test_interval_strip_locs_single_unit(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], False, False) \
== [_Location(2,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], True, False) \
== [_Location(3,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], False, True) \
== [_Location(2,'anything'),_Location(1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(2, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(1, 'whatever')]
def test_interval_strip_locs_corner_cases(self):
clnd = tb_12_days()
assert clnd._strip_interval_locs(
[_Location(0, 'anything'), _Location(0, 'whatever')], True, True) \
== [_Location(1, 'anything'), _Location(-1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(-4, 'anything'), _Location(-2, 'whatever')], True, True) \
== [_Location(-3, 'anything'), _Location(-3, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], False, False) \
== [_Location(None,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], True, False) \
== [_Location(None,'anything'),_Location(2, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(2, 'whatever')], False, True) \
== [_Location(None,'anything'),_Location(1, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(2,'anything'),_Location(None, 'whatever')], True, True) \
== [_Location(3,'anything'),_Location(None, 'whatever')]
assert clnd._strip_interval_locs(
[_Location(None,'anything'),_Location(None, 'whatever')], True, True) \
== [_Location(None,'anything'),_Location(None, 'whatever')]
def test_interval_strip_locs_bad_locs(self):
# in '_strip_interval_locs' we do not care about validity of 'locs'
# type and value; other parts of 'get_interval' should care about this
assert True
def test_get_interval_with_bad_closed(self):
clnd = tb_12_days()
with pytest.raises(ValueError):
clnd.get_interval(closed='010')
with pytest.raises(ValueError):
clnd.get_interval(closed=True)
class TestIntervalConstructorWithTS(object):
def test_interval_constructor_with_two_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 8, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 9, 0, 0, 0)
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
assert ivlx._loc == ivl._loc
def test_interval_constructor_with_none_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval((None, '08 Jan 2017 15:00'))
assert ivl._loc == (0,8)
ivl = clnd.get_interval((np.nan, '08 Jan 2017 15:00'))
assert ivl._loc == (0,8)
ivlx = clnd((None, '08 Jan 2017 15:00'))
assert ivlx._loc == ivl._loc
ivl = clnd.get_interval(('02 Jan 2017 15:00', None))
assert ivl._loc == (2,12)
ivl = clnd.get_interval(('02 Jan 2017 15:00', pd.NaT))
assert ivl._loc == (2,12)
ivl = clnd(('02 Jan 2017 15:00', pd.NaT))
assert ivl._loc == (2,12)
ivl = clnd.get_interval((None, None))
assert ivl._loc == (0,12)
ivl = clnd.get_interval((np.nan, None))
assert ivl._loc == (0,12)
ivl = clnd((pd.NaT, np.nan))
assert ivl._loc == (0,12)
def test_interval_iterator(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'))
wslist1 = []
for ws in ivl:
wslist1.append(ws)
wslist2 = list(ivl)
assert len(wslist1) == 7
assert len(wslist2) == 7
for i in range(7):
assert isinstance(wslist1[i], Workshift)
assert isinstance(wslist2[i], Workshift)
assert wslist1[i]._loc == i+2
assert wslist1[i]._loc == i+2
def test_interval_constructor_with_two_ts_open_ended(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='11')
assert ivl._loc == (2,8)
assert len(ivl) == 7
ivlx = clnd(('02 Jan 2017 15:00', '08 Jan 2017 15:00'), closed='11')
assert ivlx._loc == ivl._loc
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='01')
assert ivl._loc == (3,8)
assert len(ivl) == 6
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='10')
assert ivl._loc == (2,7)
assert len(ivl) == 6
ivl = clnd.get_interval(('02 Jan 2017 15:00', '08 Jan 2017 15:00'),
closed='00')
assert ivl._loc == (3,7)
assert len(ivl) == 5
ivl = clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='01')
assert ivl._loc == (3,3)
assert len(ivl) == 1
ivl = clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='10')
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_with_closed_leads_to_void(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'))
assert ivl._loc == (2,2)
assert len(ivl) == 1
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='01')
with pytest.raises(VoidIntervalError):
clnd(('02 Jan 2017 15:00', '02 Jan 2017 15:00'), closed='01')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='10')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'),
closed='00')
with pytest.raises(VoidIntervalError):
clnd.get_interval(('02 Jan 2017 15:00', '03 Jan 2017 15:00'),
closed='00')
def test_interval_constructor_with_OOB_ts(self):
clnd = tb_12_days()
# only one end of the interval is OOB
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('02 Jan 2017 15:00', '13 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('02 Jan 2017 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(PartialOutOfBoundsError):
clnd(('02 Jan 2017 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('30 Dec 2016 15:00', '08 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '08 Jan 2017 15:00'),
clip_period=False)
# the interval spans over the timeboard
with pytest.raises(PartialOutOfBoundsError):
ivl = clnd.get_interval(('30 Dec 2016 15:00', '13 Jan 2017 15:00'))
with pytest.raises(PartialOutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '13 Jan 2017 15:00'),
clip_period=False)
with pytest.raises(VoidIntervalError):
clnd.get_interval(('13 Jan 2017 15:00', '30 Dec 2016 15:00'))
# the interval is completely outside the timeboard
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('25 Dec 2016 15:00', '30 Dec 2016 15:00'))
# OOBError is ok, since we cannot clip a complete outsider anyway
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('30 Dec 2016 15:00', '25 Dec 2016 15:00'))
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('13 Jan 2017 15:00', '15 Jan 2017 15:00'))
# OOBError is ok, since we cannot clip a complete outsider anyway
with pytest.raises(OutOfBoundsError):
clnd.get_interval(('15 Jan 2017 15:00', '13 Jan 2017 15:00'))
def test_interval_constructor_with_same_ts(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 15:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 2, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 3, 0, 0, 0)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_reverse_ts_to_same_BU(self):
clnd = tb_12_days()
ivl = clnd.get_interval(('02 Jan 2017 15:00', '02 Jan 2017 10:00'))
assert ivl.start_time == datetime.datetime(2017, 1, 2, 0, 0, 0)
assert ivl.end_time > datetime.datetime(2017, 1, 2, 23, 59, 59)
assert ivl.end_time < datetime.datetime(2017, 1, 3, 0, 0, 0)
assert ivl._loc == (2,2)
assert len(ivl) == 1
def test_interval_constructor_reverse_ts(self):
clnd = tb_12_days()
with pytest.raises(VoidIntervalError):
clnd.get_interval(('08 Jan 2017 15:00', '02 Jan 2017 15:00'))
with pytest.raises(VoidIntervalError):
clnd(('08 Jan 2017 15:00', '02 Jan 2017 15:00'))
def test_interval_constructor_two_pd_periods_as_ts(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='31 Mar 2017',
layout=[0, 1, 0])
ivl = clnd.get_interval((
|
pd.Period('05 Jan 2017 15:00', freq='M')
|
pandas.Period
|
import pathlib
import numpy as np
import pandas as pd
import pytest
import settings
from local import moex
from local.moex.iss_quotes_t2 import QuotesT2DataManager, t2_shift, log_returns_with_div
from web.labels import CLOSE_PRICE, VOLUME
def test_quotes_t2_manager(tmpdir, monkeypatch):
data_dir = pathlib.Path(tmpdir.mkdir("test_quotes_t2"))
monkeypatch.setattr(settings, 'DATA_PATH', data_dir)
manager = QuotesT2DataManager('UPRO')
assert isinstance(manager.value, pd.DataFrame)
assert len(manager.value.columns) == 2
assert manager.value.index.is_monotonic_increasing
assert manager.value.index.is_unique
assert manager.value.index[0] == pd.to_datetime('2014-06-09')
assert manager.value.iloc[1, 0] == pytest.approx(2.9281)
assert manager.value.iloc[2, 1] == 44868000
assert manager.value.shape[0] > 1000
assert manager.value.loc['2018-09-07', CLOSE_PRICE] == pytest.approx(2.633)
assert manager.value.loc['2018-09-10', VOLUME] == 9303000
def test_quotes_t2_manager_update():
manager = QuotesT2DataManager('MTSS')
last_row = manager.value.iloc[-1:, :]
df = manager.download_update()
assert isinstance(df, pd.DataFrame)
assert df.shape == (1, 2)
assert np.allclose(df, last_row)
def test_quotes_t2():
df = moex.quotes_t2('BANEP')
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 2
assert df.index.is_monotonic_increasing
assert df.index.is_unique
assert df.index[0] == pd.to_datetime('2014-06-09')
assert df.iloc[1, 0] == pytest.approx(1833.0)
assert df.iloc[2, 1] == 23164
assert df.shape[0] > 1000
assert df.loc['2018-09-07', CLOSE_PRICE] == pytest.approx(1721.5)
assert df.loc['2018-09-10', VOLUME] == 35287
def test_prices_t2():
df = moex.prices_t2(('MSTT', 'SBERP'))
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 2
assert df.index.is_monotonic_increasing
assert df.index.is_unique
assert df.index[0] == pd.to_datetime('2013-03-25')
assert df.loc['2013-03-26', 'SBERP'] == pytest.approx(72.30)
assert np.isnan(df.loc['2013-03-27', 'MSTT'])
assert df.loc['2014-06-09', 'MSTT'] == pytest.approx(110.48)
assert df.shape[0] > 1000
assert df.loc['2018-09-07', 'MSTT'] == pytest.approx(92.0)
assert df.loc['2018-09-10', 'SBERP'] == pytest.approx(148.36)
def test_volumes_t2():
df = moex.volumes_t2(('GMKN', 'AKRN'))
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 2
assert df.index.is_monotonic_increasing
assert df.index.is_unique
assert df.index[0] == pd.to_datetime('2014-06-09')
assert df.loc['2014-06-09', 'GMKN'] == 212826
assert df.loc['2014-06-10', 'AKRN'] == 16888
assert df.shape[0] > 1000
assert df.loc['2018-09-07', 'GMKN'] == 100714
assert df.loc['2018-09-10', 'AKRN'] == 4631
def test_t2_shift():
index = moex.prices_t2(('NLMK',)).loc[:pd.Timestamp('2018-10-08')].index
assert pd.Timestamp('2018-05-14') == t2_shift(pd.Timestamp('2018-05-15'), index)
assert pd.Timestamp('2018-07-05') == t2_shift(pd.Timestamp('2018-07-08'), index)
assert pd.Timestamp('2018-09-28') == t2_shift(pd.Timestamp('2018-10-01'), index)
assert pd.Timestamp('2018-10-09') == t2_shift(pd.Timestamp('2018-10-10'), index)
assert pd.Timestamp('2018-10-11') == t2_shift(pd.Timestamp('2018-10-12'), index)
assert pd.Timestamp('2018-10-11') == t2_shift(pd.Timestamp('2018-10-13'), index)
assert pd.Timestamp('2018-10-11') == t2_shift(pd.Timestamp('2018-10-14'), index)
assert pd.Timestamp('2018-10-12') == t2_shift(pd.Timestamp('2018-10-15'), index)
assert pd.Timestamp('2018-10-17') == t2_shift(pd.Timestamp('2018-10-18'), index)
def test_log_returns_with_div():
data = log_returns_with_div(('GMKN', 'RTKMP', 'MTSS'), pd.Timestamp('2018-10-06'))
assert isinstance(data, pd.DataFrame)
assert list(data.columns) == ['GMKN', 'RTKMP', 'MTSS']
assert data.index[-13] == pd.Timestamp('2017-10-06')
assert data.index[-1] == pd.Timestamp('2018-10-06')
assert data.loc['2018-10-06', 'MTSS'] == pytest.approx(np.log(((275.1 + 0) / 256.1)))
assert data.loc['2018-10-06', 'GMKN'] == pytest.approx(np.log(((11292 + 776.02) / 11206)))
assert data.loc['2018-08-06', 'RTKMP'] == pytest.approx(np.log(((61.81 + 0) / 62)))
assert data.loc['2018-07-06', 'RTKMP'] == pytest.approx(np.log(((62 + 5.045825249373) / 64)))
data = log_returns_with_div(('GMKN', 'RTKMP', 'MTSS'), pd.Timestamp('2018-10-07'))
assert data.loc['2018-10-07', 'MTSS'] == pytest.approx(np.log(((275.1 + 0) / 256)))
data = log_returns_with_div(('GMKN', 'RTKMP', 'MTSS'),
|
pd.Timestamp('2018-10-08')
|
pandas.Timestamp
|
from unittest import TestCase
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import trtools.core.topper as topper
import imp
imp.reload(topper)
arr = np.random.randn(10000)
s = pd.Series(arr)
df = tm.makeDataFrame()
class TestTopper(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_topn_largest(self):
# get the n largest
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10]
np.testing.assert_almost_equal(bn_res, pd_res)
# change result to biggest to smallest
bn_res = topper.bn_topn(arr, 10, ascending=True)
assert bn_res[-1] == max(arr) # sanity check
pd_res = s.order(ascending=True)[-10:] # grab from end since we reversed
np.testing.assert_almost_equal(bn_res, pd_res)
def test_topn_big_N(self):
"""
When calling topn where N is greater than the number of non-nan values.
This can happen if you're tracking a Frame of returns where not all series start at the same time.
It's possible that in the begining or end, or anytime for that matter, you might not have enough
values. This screws up the logic.
"""
# test data
arr = np.random.randn(100)
arr[5:] = np.nan # only first four are non-na
s = pd.Series(arr)
# top
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10].dropna()
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10].dropna() # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_smallest(self):
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10]
|
tm.assert_almost_equal(bn_res, pd_res.values)
|
pandas.util.testing.assert_almost_equal
|
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import argparse
import itertools
import json
import operator
import os
import re
import sys
import pickle
import math
from distutils.util import strtobool
import numpy as np
import pysam
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.backends.backend_pdf import PdfPages
from polyleven import levenshtein
from Bio import SeqIO
import seaborn as sns
import pandas as pd
from scipy import stats
###### Usage
#python plot_identity_error_alignment_normUnal.py -i basecaller1/norm_unaligned_assembly_polished basecaller2/norm_unaligned_assembly_polished basecaller3/norm_unaligned_assembly_polished -l basecaller1 basecaller2 basecaller3 -o outfolder -p appendix_outputname
#
def safe_div(x, y):
if y == 0:
return None
return x / y
plt.rcParams["patch.force_edgecolor"] = False
def plot_error_identity(df, pdf=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(13,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="error", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Error")
plt.title("Error rate of aligned reads to reference genome")
ax.get_legend().remove()
ax = fig.add_subplot(1, 2, 2)
sns.barplot(x="basecaller", hue="genome", y="identity", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Identity")
plt.title("Identity rate of aligned reads to reference genome")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=10)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_match_mismatch_indels(df, pdf=None, stacked=True):
sns.set(font_scale=1)
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
if stacked:
ax = fig.add_subplot(2, 2, 1)
ax2 = ax.twiny()
#sns.barplot(x="basecaller", hue="genome", y="match", data=df, linewidth=0, ax=ax)
#plt.xlabel("Basecallers")
#plt.ylabel("%Matches")
#plt.title("Matches")
df0 = df[['basecaller', 'genome', 'mismatch', 'deletion', 'insertion', 'unaligned']]
cols = df0.columns
u, idx = np.unique(df.basecaller.tolist(), return_index=True)
order = u[np.argsort(idx)] #[u[index] for index in sorted(idx)]
df0['basecaller'] = pd.Categorical(df0.basecaller, categories=order, ordered=True) # ['f', 'a', 'w', 'h'] # prevent sorting
df0.set_index(['basecaller', 'genome'], inplace=True)
colors = plt.cm.Paired.colors
df1 = df0.unstack(level=-1) # unstack the 'Context' column
(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[1], colors[0]], rot=0, ax=ax, linewidth=0)
print(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned'])
(df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[3], colors[2]], rot=0, ax=ax, linewidth=0)
(df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[5], colors[4]], rot=0, ax=ax, linewidth=0)
df1['unaligned'].plot(kind='bar', color=[colors[7], colors[6]], rot=0, ax=ax, linewidth=0)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]
ticks = []
for r in range(df.shape[0]//2):
ticks.append(r - 0.25)
ticks.append(r + 0.05)
ax.set_xticks(ticks)
ax.set_xticklabels(['lambda', 'ecoli'] * (df.shape[0]//2), rotation=45, fontsize=8)
ax.grid(axis="x")
legend_labels = []
labels = ["mismatch", "", "deletion", "", "insertion", "", "unaligned", ""]
#for val in labels:
# if val in legend_labels:
# legend_labels.append("")
# else:
# legend_labels.append(val)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]3
ax.legend(labels, bbox_to_anchor=(-0.08, 1.2), loc=2, borderaxespad=0., ncol=4, fontsize=10) #(1.05, 1)
ax.set_ylabel("mean error in %")
ax.set_xlabel("species")
ax.set_yscale('log') #,base=20)
#ax.text(0.02, -0.2, ' '.join(order), transform=ax.transAxes, fontsize=11) #horizontalalignment='center', verticalalignment='center'
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks([0.02, 1, 2, 3])
ax2.set_xticklabels(order, fontsize=10)
ax.xaxis.set_ticks_position('none')
#ax2.xaxis.set_ticks_position('none')
ax2.grid(axis="x")
#ax.legend(legend_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel("Proportion of errors")
else:
ax = fig.add_subplot(2, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="mismatch", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Mismatches")
plt.title("Mismatches")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 3)
sns.barplot(x="basecaller", hue="genome", y="deletion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Deletion")
plt.title("Deletion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 4)
sns.barplot(x="basecaller", hue="genome", y="insertion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Insertion")
plt.title("Insertion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_boxplot(data, labels, pdf=None, title="relative read length", ylabel="read length / reference length in %", reference=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(6,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 1, 1)
box = plt.boxplot(data, patch_artist=True)
ticks = np.arange(1, len(labels)+1)
plt.xticks(ticks, labels, rotation=45, ha="right")
plt.ylabel(ylabel)
plt.xlabel("Basecaller")
plt.title(title)
#plt.yscale('log') #,base=20)
if reference is not None:
plt.axhline(reference, c='r')
colors = len(labels[-3:]) * ['#EAEAF2'] + 3* ["#88888C"]
#colors2 = len(labels[-3:]) * ['#DD8655'] + 3* ["#181819"]
for patch, color in zip(box['boxes'], colors):
patch.set_facecolor(color)
#med.set_facecolor(color2)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def make_argparser():
parser = argparse.ArgumentParser(description='Prints summary about alignment of basecalled reads.')
parser.add_argument('-i', '--fastq', nargs="*",
help='FASTA/Q files with basecalled reads.')
parser.add_argument('-l', '--labels', nargs="*",
help='list of labels. same order as list with fastq/a files')
parser.add_argument('-o', '--out',
help='out path.')
parser.add_argument('-p', '--prefix', default="basecalled",
help='out path.')
parser.add_argument('--stacked', type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True,
help='stack error rates in plot.')
return parser
def median_abs_dev(x):
return(stats.median_absolute_deviation(x))
def report_errors(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
fastq = args.fastq
basecallers = args.labels
out = args.out
prefix = args.prefix
stacked = args.stacked
with PdfPages(out + "/{}_error_alignment_rates.pdf".format(prefix)) as pdf:
lambd = []
ecoli = []
df = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_std = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned','identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_all =
|
pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
|
pandas.DataFrame
|
import os
import sys
from collections import Counter
from functools import partial
import datetime
import networkx as nx
import numpy as np
import pandas as pd
from numba import jit
from sklearn.metrics import roc_auc_score, roc_curve
from linkprediction.database_connector import DatabaseConnector
from linkprediction.graph_import.original_graph_handler import \
build_original_graph
from linkprediction.graph_import.predicted_graph_handler import \
load_predicted_graph_from_db
from linkprediction.prediction_methods.preparation.preprocessing import \
preprocess_df
from linkprediction.prediction_methods.preparation.utility import \
assign_labels
from linkprediction.prediction_methods.preparation.sampling import (
find_all_missing_edges, sampling_by_percentage)
from linkprediction.prediction_methods.prediction.utility import (
get_prediction, get_dataframe)
from linkprediction.prediction_methods.prediction.classification import (
get_X_y)
from linkprediction.prediction_methods.predictor_factory import \
LinkPredictorFactory
from linkprediction.prediction_methods.prediction_monitor import \
PredictionMonitor
from linkprediction.prediction_methods.prediction.social_theory import \
get_attribute_threshold
from linkprediction.prediction_methods.evaluation.metrics import (
ROC, AUC, get_metrics_as_json)
from linkprediction.graph_import.predicted_graph_handler import (
save_predicted_graph_to_db, get_st_features_from_predicted_graph,
add_or_update_edge, hierarchical_to_flat, flat_to_hierarchical)
class PredictionWorker(object):
def __init__(self,
process_id,
project_id,
predictors,
validation,
preprocessing,
train_split,
test_split,
seed):
self.process_id = process_id
self.project_id = project_id
self.predictors = predictors
self.validation = validation
self.preprocessing = preprocessing
self.train_split = train_split
self.test_split = test_split
self.seed = seed
self.monitor = PredictionMonitor(self, self._get_tasks())
self.factory = LinkPredictorFactory()
def predict(self):
try:
self.monitor.pending()
# PIPELINE - PREPARATION
ground_truth_graph_h = build_original_graph('project_id', self.project_id, 'hierarchical')
ground_truth_graph_f = hierarchical_to_flat(ground_truth_graph_h)
train_graph_f, test_graph_f = self._sample_graphs(ground_truth_graph_f)
train_missing_edges, test_missing_edges = self._get_missing_edges(ground_truth_graph_f, train_graph_f, test_graph_f)
train_set, test_set = self._prepare_labels(ground_truth_graph_f, test_graph_f, train_missing_edges, test_missing_edges)
train_graph_h = flat_to_hierarchical(train_graph_f)
predicted_graph_train = train_graph_h.copy()
test_graph_h = None
predicted_graph_test = None
if self.validation:
test_graph_h = flat_to_hierarchical(test_graph_f)
predicted_graph_test = test_graph_h.copy()
# PIPELINE - TOPOLOGY
train_t_df, test_t_df = self._topology_pipeline(ground_truth_graph_f, train_graph_f, test_graph_f, train_set, test_set)
# PIPELINE - SOCIALTHEORY
train_st_df, test_st_df = self._socialtheory_pipeline(train_graph_h, test_graph_h, train_set, test_set, predicted_graph_train, predicted_graph_test)
# PIPELINE - CLASSIFICATION
train_features_df =
|
pd.concat([train_t_df, train_st_df], axis=1)
|
pandas.concat
|
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
pd.Timestamp('2019-08-07 00:00:00'): 2767,
pd.Timestamp('2019-08-08 00:00:00'): 2643,
pd.Timestamp('2019-08-09 00:00:00'): 1506,
pd.Timestamp('2019-08-10 00:00:00'): 1443,
pd.Timestamp('2019-08-11 00:00:00'): 2466,
pd.Timestamp('2019-08-12 00:00:00'): 3482,
pd.Timestamp('2019-08-13 00:00:00'): 1940,
pd.Timestamp('2019-08-14 00:00:00'): 1921,
pd.Timestamp('2019-08-15 00:00:00'): 3479,
pd.Timestamp('2019-08-16 00:00:00'): 3053,
pd.Timestamp('2019-08-17 00:00:00'): 2519,
pd.Timestamp('2019-08-18 00:00:00'): 2865,
pd.Timestamp('2019-08-19 00:00:00'): 1735,
pd.Timestamp('2019-08-20 00:00:00'): 1250},
'order_total_avg': {pd.Timestamp('2019-08-01 00:00:00'): 1182286.0960463749,
pd.Timestamp('2019-08-02 00:00:00'): 1341449.559055637,
pd.Timestamp('2019-08-03 00:00:00'): 1270616.0372525519,
pd.Timestamp('2019-08-04 00:00:00'): 1069011.1516039693,
pd.Timestamp('2019-08-05 00:00:00'): 1355304.7342628485,
|
pd.Timestamp('2019-08-06 00:00:00')
|
pandas.Timestamp
|
#!/usr/bin/env python3
# Copyright 2021 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import utils.utils as utils
import pandas as pd
from abc import ABC, abstractmethod
class AbstractNetwork(ABC, nn.Module):
def __init__(self, n_in, n_hidden, n_out, activation='relu',
output_activation='linear', bias=True,
forward_requires_grad=False,
initialization='orthogonal',
save_df=False,
clip_grad_norm=-1):
nn.Module.__init__(self)
self._depth = len(n_hidden) + 1
self._layers = None
self._input = None
self._forward_requires_grad = forward_requires_grad
self._use_bias = bias
self._save_df = save_df
self._clip_grad_norm = clip_grad_norm
if save_df:
self.bp_angles = pd.DataFrame(
columns=[i for i in range(0, self._depth)])
self.nullspace_relative_norm = pd.DataFrame(
columns=[i for i in range(0, self._depth)])
self.gn_angles = pd.DataFrame(
columns=[i for i in range(0, self._depth)])
self.gnt_angles = pd.DataFrame(
columns=[i for i in range(0, self._depth)])
self.ndi_angles = pd.DataFrame(
columns=[i for i in range(0, self._depth)])
self.ndi_angles_network = pd.DataFrame(columns=[0])
self.jac_pinv_angles = pd.DataFrame(columns=[0])
self.jac_transpose_angles = pd.DataFrame(columns=[0])
self.jac_pinv_angles_init = pd.DataFrame(columns=[0])
self.jac_transpose_angles_init =
|
pd.DataFrame(columns=[0])
|
pandas.DataFrame
|
"""
This file is part of the accompanying code to our paper
<NAME>., <NAME>., <NAME>., & <NAME>. (2021). Uncovering flooding mecha-
nisms across the contiguous United States through interpretive deep learning on
representative catchments. Water Resources Research, 57, e2021WR030185.
https://doi.org/10.1029/2021WR030185.
Copyright (c) 2021 <NAME>. All rights reserved.
You should have received a copy of the MIT license along with the code. If not,
see <https://opensource.org/licenses/MIT>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as mpl
import matplotlib.patches as mpatches
import matplotlib.dates as mdates
def plot_peaks(Q, peak_dates, plot_range=[None, None], linecolor="tab:brown", markercolor="tab:red", figsize=(7.5, 2.0)):
"""
Plot the identified flood peaks.
Parameters
----------
Q: pandas series of streamflow observations.
peak_dates: a sequence of flood peaks' occurrence dates.
plot_range: the date range of the plot, it can be a pair of date strings (default: [None, None]).
linecolor: the color of the line (default: 'tab:brown').
markercolor: the color of the marker (default: 'tab:red').
figsize: the width and height of the figure in inches (default: (7.5, 2.0)).
"""
fig, ax = plt.subplots(figsize=figsize)
fig.tight_layout()
plot_range[0] = Q.index[0] if plot_range[0] == None else plot_range[0]
plot_range[1] = Q.index[-1] if plot_range[1] == None else plot_range[1]
ax.plot(Q["flow"].loc[plot_range[0]:plot_range[1]], color=linecolor, lw=1.0)
ax.plot(
Q.loc[peak_dates, "flow"].loc[plot_range[0]:plot_range[1]],
"*",
c=markercolor,
markersize=8,
)
ax.set_title(f"Identified flood peaks from {plot_range[0]} to {plot_range[1]}")
ax.set_ylabel("flow(mm)")
plt.show()
def plot_eg_individual(dataset, peak_eg_dict, peak_eg_var_dict, peak_date, title_suffix=None, linewidth=1.5, figsize=(10, 3)):
eg_plot = dataset.loc[pd.date_range(end=peak_date, periods=list(peak_eg_dict.values())[0].shape[1]+1, freq='d')[:-1]]
eg_plot.loc[:, "prcp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 1])
eg_plot.loc[:, "prcp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 1])
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs1 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.00, right=0.45, height_ratios=[2.5, 1.5])
ax1 = fig.add_subplot(gs1[0, 0])
ax2 = fig.add_subplot(gs1[1, 0])
gs2 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.55, right=1.00, height_ratios=[2.5, 1.5])
ax3 = fig.add_subplot(gs2[0, 0])
ax4 = fig.add_subplot(gs2[1, 0])
for ax in [ax1, ax3]:
ax.spines["bottom"].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
for ax in [ax2, ax4]:
ax.set_ylabel(r'$\phi^{EG}_{i}$')
ax.spines["top"].set_visible(False)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_ylim(bottom=np.min(peak_eg_dict[pd.to_datetime(peak_date)]),
top=np.max(peak_eg_dict[pd.to_datetime(peak_date)]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax1.plot(eg_plot['prcp'], color='k', lw=linewidth)
ax1.set_ylabel('P [mm]', ha='center', y=0.5)
ax2.plot(eg_plot['prcp_eg'], color='blue', lw=linewidth)
ax2.fill_between(eg_plot['prcp_eg'].index,
eg_plot['prcp_eg']-eg_plot.loc[:, "prcp_eg_val"],
eg_plot['prcp_eg']+eg_plot.loc[:, "prcp_eg_val"], color='blue', alpha=0.3)
ax2.yaxis.label.set_color('blue')
ax2.tick_params(axis='y', colors='blue')
ax3.plot(eg_plot['tmean'], color='k', lw=linewidth)
ax3.set_ylabel('T [\u2103]', ha='center', y=0.5)
ax4.plot(eg_plot['temp_eg'], color='red', lw=linewidth)
ax4.fill_between(eg_plot['temp_eg'].index,
eg_plot['temp_eg']-eg_plot.loc[:, "temp_eg_val"],
eg_plot['temp_eg']+eg_plot.loc[:, "temp_eg_val"], color='red', alpha=0.3)
ax4.yaxis.label.set_color('red')
ax4.tick_params(axis='y', colors='red')
ax1.set_title(f"Flood on {pd.to_datetime(peak_date).strftime('%d %B %Y')} {str(title_suffix)}",
fontweight='bold', loc='left')
plt.show()
def plot_arrow(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2, arrowstyle="-|>", facecolor='black')
a1.add_artist(con)
def plot_simple_arrow(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2, arrowstyle="->", facecolor='black')
a1.add_artist(con)
def plot_line(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2)
a1.add_artist(con)
def plot_decomp(dataset, decomp_dict, peak_date, title_suffix=None, linewidth=1.0, figsize=(10, 5)):
blue_colors = mpl.cm.Blues(np.linspace(0,1,16))
green_colors = mpl.cm.Greens(np.linspace(0,1,16))
red_colors = mpl.cm.Reds(np.linspace(0,1,16))
purple_colors = mpl.cm.Purples(np.linspace(0,1,16))
winter_colors = mpl.cm.winter(np.linspace(0,1,16))
autumn_colors = mpl.cm.autumn(np.linspace(0,1,16))
decomp_plot = dataset.loc[pd.date_range(end=peak_date, periods=list(decomp_dict.values())[0]['x'].shape[0]+1, freq='d')]
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs1 = fig.add_gridspec(nrows=2, ncols=1, hspace=1.2, left=0.000, right=0.180, top=0.70, bottom=0.30)
gs2 = fig.add_gridspec(nrows=6, ncols=1, hspace=0.6, left=0.250, right=0.550)
gs3 = fig.add_gridspec(nrows=3, ncols=1, hspace=0.6, left=0.650, right=1.000, top=0.80, bottom=0.20)
ax1_1 = fig.add_subplot(gs1[0, 0])
ax1_2 = fig.add_subplot(gs1[1, 0])
ax2_1 = fig.add_subplot(gs2[0, 0])
ax2_2 = fig.add_subplot(gs2[1, 0])
ax2_3 = fig.add_subplot(gs2[2, 0])
ax2_4 = fig.add_subplot(gs2[3, 0])
ax2_5 = fig.add_subplot(gs2[4, 0])
ax2_6 = fig.add_subplot(gs2[5, 0])
ax3_1 = fig.add_subplot(gs3[0, 0])
ax3_2 = fig.add_subplot(gs3[1, 0])
ax3_3 = fig.add_subplot(gs3[2, 0])
ax1_1.plot(decomp_plot['prcp'].iloc[:-1], color='k', lw=linewidth)
ax1_2.plot(decomp_plot['tmean'].iloc[:-1], color='k', lw=linewidth)
for i in range(16):
ax2_1.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hi_arr'][:, i],
c=green_colors[i], alpha=0.60, lw=linewidth)
ax2_2.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hc_arr'][:, i],
c=blue_colors[i], alpha=0.60, lw=linewidth)
ax2_3.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hf_arr'][:, i],
c=red_colors[i], alpha=0.60, lw=linewidth)
ax2_4.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['ho_arr'][:, i],
c=purple_colors[i], alpha=0.60, lw=linewidth)
ax2_5.plot(decomp_plot.index[:], decomp_dict[pd.to_datetime(peak_date)]['c_states'][:, i],
c=autumn_colors[i], alpha=0.60, lw=linewidth)
ax2_6.plot(decomp_plot.index[:], decomp_dict[pd.to_datetime(peak_date)]['h_states'][:, i],
c=winter_colors[i], alpha=0.60, lw=linewidth)
ax3_1.plot(decomp_plot.index[:-1], decomp_dict[
|
pd.to_datetime(peak_date)
|
pandas.to_datetime
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
'''
计算收益率
输入:V, C
输出:收益率
'''
def returnRatio(V, C=100000.0):
return V/C-1.0
'''
计算收益率
输入:V数组, C
输出:收益率数组
'''
def returnRatioArr(VArr, C=100000.0):
arr = []
for v in VArr: arr.append(v/C-1.0)
return arr
'''
计算有效投资天数
输入:买入df, 卖出df, 模拟投资结果perf(df的索引为时间)
输出:收益率
'''
def validInvestDays(buys, sells, perf):
days = 0
for i in range(len(sells)):
days += (sells.index[i]-buys.index[i]).days
if len(buys)>len(sells):
days += (perf.index[-1]-buys.index[-1]).days
return days
'''
计算年化收益率
输入:收益率数组, T, D
输出:年化收益率
'''
def annualizedReturnRatio(returnRatioArr, T=250.0, D=250.0):
import math
tmp = 1
for r in returnRatioArr: tmp *= (r+1)
return math.pow(tmp, D/T)-1
'''
计算MA
输入:
输出:DataFrame
'''
def MA(closeSeries, shortWin=5, longWin=20):
shortMA = pd.rolling_mean(closeSeries, window=shortWin)
longMA = pd.rolling_mean(closeSeries, window=longWin)
return pd.DataFrame({'Close': closeSeries, str(shortWin)+'MA':shortMA, str(longWin)+'MA': longMA})
'''
计算Bollinger bands布林带
输入:
输出:DataFrame
'''
def BollingerBand(closeSeries, win=20):
MA = pd.rolling_mean(closeSeries, window=win)
std = pd.rolling_std(closeSeries, window=win)
up = MA+2*std
down = MA-2*std
return pd.DataFrame({'Close': closeSeries, 'MA':MA, 'Upper':up, 'Lower':down})
'''
计算EMA/EWMA
输入:
输出:DataFrame
'''
def EWMA(closeSeries, shortWin=12, longWin=26):
shortEWMA = pd.ewma(closeSeries, span=shortWin)
longEWMA = pd.ewma(closeSeries, span=longWin)
return pd.DataFrame({'Close': closeSeries, str(shortWin)+'EWMA':shortEWMA, str(longWin)+'EWMA': longEWMA})
'''
计算MACD
输入:
输出:DataFrame
'''
def MACD(closeSeries, shortWin=12, longWin=26, DIFWin=9):
shortEWMA = pd.ewma(closeSeries, span=shortWin)
longEWMA =
|
pd.ewma(closeSeries, span=longWin)
|
pandas.ewma
|
import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = pd.DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = pd.DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = pd.DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, True, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, True, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = pd.DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = pd.DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
pd.Timedelta("2 hours"),
pd.Timedelta("3 hours"),
pd.Timedelta("4 hours"),
pd.Timedelta("5 hours"),
],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (
" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00"
)
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH#21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(
|
date_range(start, end, tz=tz)
|
pandas.date_range
|
import pandas as pd
import sys
job_df = pd.read_csv(sys.argv[1])
my_index = pd.MultiIndex(levels = [[],[]], codes=[[],[]], names=[u'labels', u'path_idx'])
df_upd = pd.DataFrame(index=my_index)
#print(df)
for idx in job_df.index:
print(idx)
reactant_idx = job_df.loc[idx, 'reactant']
r_idx = job_df.loc[idx, 'r_idx']
letter = job_df.loc[idx, 'letter']
#print(reactant_idx, r_idx, letter)
pkl_file = str(reactant_idx)+'/'+str(reactant_idx)+'_'+str(r_idx)+'_'+letter+'.pkl'
df =
|
pd.read_pickle(pkl_file)
|
pandas.read_pickle
|
import pandas as pd
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pickle
import util.plot
import util.data
import numpy as np
from pandas.plotting import scatter_matrix
np.random.seed(123)
data =
|
pd.read_csv('data/training_set_VU_DM_clean.csv', sep=';',nrows=10000)
|
pandas.read_csv
|
import numpy as np
import tensorflow as tf
import keras
import run_expert
import tf_util
import gym
import load_policy
import math
import matplotlib.pyplot as plt
#%matplotlib inline
import pandas as pd
import seaborn as sns
import tqdm
class Config(object):
n_feature= 11
n_classes = 3
dropout=0.5
hidden_size1= 128
hidden_size2= 256
hidden_size3 = 64
batch_size = 256
n_epochs = 10
lr = 0.0005
itera = 20
train_itera = 20
envname = 'Hopper-v1'
max_steps = 1000
class Model(object):
def __init__(self,config):
self.config= config
self.build()
def add_placerholders(self):
self.input_placeholder =tf.placeholder(tf.float32,shape=(None,self.config.n_feature))
self.labels_placeholder = tf.placeholder(tf.float32,shape=(None,self.config.n_classes))
self.training_placeholder = tf.placeholder(tf.bool)
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self,input_batch,labels_batch=None,dropout=1,is_training=False):
feed_dict ={self.input_placeholder:input_batch,
self.dropout_placeholder:dropout,
self.training_placeholder:is_training}
if labels_batch is not None:
feed_dict[self.labels_placeholder]=labels_batch
return feed_dict
def prediction_op(self):
x=self.input_placeholder
layer1 = tf.layers.dense(x,self.config.hidden_size1,activation=tf.nn.relu)
#layer2 = tf.nn.dropout(layer1,keep_prob=self.dropout_placeholder) no need dropout
layer3 = tf.layers.dense(layer1,self.config.hidden_size2,activation=tf.nn.relu)
#layer4 =tf.nn.dropout(layer3,keep_prob=self.dropout_placeholder)
layer5 = tf.layers.dense(layer3,self.config.hidden_size3,activation=tf.nn.relu)
layer6 = tf.layers.dense(layer5,self.config.n_classes)
return layer6
def loss_op(self,pred):
loss = tf.losses.mean_squared_error(labels=self.labels_placeholder,predictions=pred)
#loss = loss
return loss
def training_op(self,loss):
train_op = tf.train.AdamOptimizer().minimize(loss)
return train_op
def train_on_batch(self,sess,input_batch,labels_batch):
feed= self.create_feed_dict(input_batch,labels_batch,self.config.dropout,True)
_, loss = sess.run([self.train_op, self.loss], feed_dict=feed)
#train_writer.add_summary(rs, i)
return loss
def build(self):
self.add_placerholders()
self.pred=self.prediction_op()
self.loss=self.loss_op(self.pred)
self.train_op=self.training_op(self.loss)
def fit(self,sess,train_x,train_y):
self.train_on_batch(sess,train_x,train_y)
def get_pred(self,sess,input_batch):
feed = self.create_feed_dict(input_batch,None,1,0)
pred=sess.run(self.pred,feed_dict=feed)
return pred
def load_data(filename):
tmp=np.load(filename)
train_X=tmp['X']
train_y=tmp['Y']
return train_X,train_y
def dagger(sess,model):
policy_name ='experts/Hopper-v1'
policy_fn = load_policy.load_policy(policy_name+'.pkl')
#print(policy_fn)
env = gym.make(Config.envname)
rollouts = 20
observations = []
actions = []
for _ in range(rollouts):
obs = env.reset()
#print(obs.shape)
done = False
steps = 0
while not done:
action = model.get_pred(sess, obs[None, :])
action_new = policy_fn(obs[None, :])
obs, r, done, _ = env.step(action)
#print(obs.shape)
observations.append(obs)
actions.append(action_new)
steps += 1
if steps >= Config.max_steps:
break
return np.array(observations),np.array(actions)
def main():
file='10result.npz'
trainx,trainy=load_data(file)
config=Config()
trainy=trainy.reshape(-1,config.n_classes)
model = Model(config)
init=tf.global_variables_initializer()
shuffle_batch_x, shuffle_batch_y = tf.train.shuffle_batch(
[trainx, trainy], batch_size=config.batch_size, capacity=10000,min_after_dequeue=5000, enqueue_many=True)
with tf.Session() as sess:
train_log_path='log'
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
losses=[]
means=[]
stds=[]
sess.run(init)
for i in tqdm.tqdm(range(config.itera)):
try:
for j in range(7820):#
batchx,batchy=sess.run([shuffle_batch_x,shuffle_batch_y])
loss =model.train_on_batch(sess,batchx,batchy)
if j % 1000 == 0:
new_x,new_y = dagger(sess,model)
new_y = new_y.reshape(-1,Config.n_classes)
trainx= tf.concat([trainx,new_x],axis=0)
trainy= tf.concat([trainy,new_y],axis=0)
print("step:", j, "loss:", loss)
# saver.save(sess, "model/model_ckpt")
if j%100==0:
losses.append([j,loss])
except tf.errors.OutOfRangeError:
print("")
finally:
coord.request_stop()
coord.join(threads)
policy_name ='experts/Hopper-v1'
#envname ='Ant-v1'
policy_fn = load_policy.load_policy(policy_name+'.pkl')
env = gym.make(config.envname)
rollouts = 20
returns = []
observations = []
actions = []
for _ in range(rollouts):
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = model.get_pred(sess, obs[None, :])
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
# if args.render:
#env.render()
if steps >= config.max_steps:
break
returns.append(totalr)
means.append(np.mean(returns))
stds.append(np.std(returns))
print('mean of returns',np.mean(returns))
print('std of returns ',np.std(returns))
df =
|
pd.DataFrame(losses,columns=['j','loss'])
|
pandas.DataFrame
|
from typing import Tuple, List, Optional
import pandas as pd
from algo_battle.domain.wettkampf import Wettkampf
class EventStatistiken:
def __init__(self):
self._daten =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Import pyVPLM packages
from pyvplm.core.definition import PositiveParameter, PositiveParameterSet
from pyvplm.addon import variablepowerlaw as vpl
from pyvplm.addon import pixdoe as doe
from pint import UnitRegistry
import save_load as sl
import pi_format as pif
import csv_export as csv
import constraint_format as csf
import round_minmax as rmm
import constant_pi as cpi
import number_of_coeff as noc
import dependency_plot as dpp
import save_plots as spl
import save_py_func as spf
# Import external libs
import copy
import os
import pandas as pd
from pandas.plotting import scatter_matrix
import plotly.graph_objects as go
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import webbrowser
import ipyfilechooser as ipf
import time
from datetime import datetime
import ipywidgets as widgets
import ipyvuetify as v
from IPython.display import display, clear_output
import warnings
import seaborn as sns
from win32gui import GetWindowRect, GetForegroundWindow
# ------------Constants------------------------
from text_list import TEXT_LIST as TL
FORBIDDEN_CHARACTERS = [' ', '|', '*', '/', '-', '+', ',', "#", "!", "$", "£", "%", "^", "&", "?", ";", "ù", "é",
"@", "¤", "µ", "è", "°", "\\", '"', "'"]
FORBIDDEN_CHARACTERS_DESC = ['|', '"', "'", "#"]
FORBIDDEN_PARAMS = ['I', 'gamma', 'beta', 're', 'ln', 'log', 'sqrt', 'arg']
DOE_MULTIPLIER = 10
# ------------Global variables-----------------
WORKDIR = os.path.abspath(os.getcwd())
OUTPUTS = 0
PHYSICAL_PARAMS = None
OLD_PHYSICAL_PARAMS = None
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
DOE_PI_LIST = []
DOE = []
TOTAL_DOE = pd.DataFrame()
FIG = plt.Figure()
AX = FIG.add_subplot(111)
RESULT_DF = pd.DataFrame()
OLD_RESULT = pd.DataFrame()
OLD_PI_SET = []
RESULT_PI = np.array([])
DEPENDENCY_CHECK_STATE = []
OLD_DEPENDENCY_CHECK_STATE = []
REGRESSION_PI_LIST = []
MODELS = {}
REGRESSIONS = []
PI0_PI_LIST = []
"""
This is the code for GUI widgets and their associated functions. The first part contains all functions,
the second part (~line 2600) contains the widgets. These two parts are subdivided by tab name.
"""
# -----------Functions--------------------------------------------------------------------------------------------------
# Fist Physical Parameters Tab, some Buckingham tab and all Toolbar functions as well as some general helper functions
def check_name(name):
"""
Parameters
----------
name String in name TextField
Returns Boolean : True if the name is valid
-------
"""
if name == '':
name_entry.error_messages = TL[0]
return False
for for_char in FORBIDDEN_CHARACTERS:
if for_char in name:
name_entry.error_messages = f"{TL[1]}: {for_char}"
return False
for for_param in FORBIDDEN_PARAMS:
if name == for_param:
name_entry.error_messages = f"{TL[51]}: {for_param}"
return False
for item in sheet.items:
if item['name'] == name or item['name'].lower() == name:
name_entry.error_messages = TL[2]
return False
return True
def check_desc(desc):
"""
Parameters
----------
desc String in description TextField
Returns Boolean : True if the description is valid
-------
"""
for for_char in FORBIDDEN_CHARACTERS_DESC:
if for_char in desc:
desc_entry.error_messages = f"{TL[3]} : {for_char}"
return False
return True
def check_unit(unit):
"""
Parameters
----------
unit String in unit TextField
Returns Boolean : True if the unit is recognized by pint
-------
"""
if unit == '':
unit_entry.error_messages = TL[4]
return False
base_registry = UnitRegistry()
try:
if unit not in base_registry:
contains_upper = False
for u in unit:
if u.isupper():
contains_upper = True
break
if contains_upper:
unit_entry.error_messages = "Unit not recognized, try in lowercase"
else:
unit_entry.error_messages = TL[5]
return False
except Exception:
unit_entry.error_messages = "Invalid characters"
return False
return True
def check_bounds():
"""
Returns Boolean : True if the bounds in the lower bound and upper bound TextFields are valid
-------
"""
lb = lb_entry.v_model
ub = ub_entry.v_model
lbool = lb is None or lb == ""
ubool = ub is None or ub == ""
if ubool:
ub_entry.error_messages = TL[6]
return False
err_mess = TL[7]
if lbool:
try:
float(ub)
return True
except ValueError:
ub_entry.error_messages = err_mess
return False
else:
brk = False
try:
ub = float(ub)
except ValueError:
ub_entry.error_messages = err_mess
brk = True
try:
lb = float(lb)
except ValueError:
lb_entry.error_messages = err_mess
brk = True
if brk:
return False
if 0 < lb < ub:
return True
else:
neg = False
err_mess = TL[8]
if lb <= 0:
neg = True
lb_entry.error_messages = err_mess
if ub <= 0:
neg = True
ub_entry.error_messages = err_mess
if neg:
return False
else:
err_mess = TL[9]
lb_entry.error_messages = err_mess
ub_entry.error_messages = err_mess
return False
def add_item(widget, event, data):
"""
Returns Adds parameter specified by the user in the sheet DataTable, if one of the attributes is invalid, shows the
user an error under the TextField
-------
"""
name_entry.error_messages = ''
desc_entry.error_messages = ''
unit_entry.error_messages = ''
lb_entry.error_messages = ''
ub_entry.error_messages = ''
if check_name(name_entry.v_model) and check_desc(desc_entry.v_model) and check_unit(
unit_entry.v_model) and check_bounds():
name = name_entry.v_model
description = desc_entry.v_model
unit = unit_entry.v_model
lb = lb_entry.v_model
if lb:
lower_bound = float(lb_entry.v_model)
else:
lower_bound = None
name = name.upper()
upper_bound = float(ub_entry.v_model)
name_entry.v_model = ''
desc_entry.v_model = ''
unit_entry.v_model = ''
lb_entry.v_model = None
ub_entry.v_model = None
sheet.items = sheet.items + [{"name": name,
"description": description,
"unit": unit,
"lower bound": lower_bound,
"upper bound": upper_bound,
"in/out": "Input"}]
def order_items():
"""
Leaves output physical parameters at the end of the set (least priority to be repetitive)
Returns ordered physical parameters
-------
"""
data = sheet.items
inputs = []
outputs = []
for item in data:
if item["in/out"] == TL[10]:
outputs.append(item)
else:
inputs.append(item)
return inputs + outputs
def gen_parameter_set():
"""
Returns Generates a PositiveParameterSet from the physical parameters in the sheet DataTable, if there are none,
returns None
-------
"""
data = order_items()
if len(data) > 0:
first = True
param_set = {}
for item in data:
if item['lower bound'] is None or item['lower bound'] == "":
bounds = [item['upper bound']]
item['name'] = item['name'].upper()
else:
bounds = [item['lower bound'], item['upper bound']]
param = PositiveParameter(item['name'], bounds, item['unit'], item['description'])
param_set[item['name']] = param
if first:
param_set = PositiveParameterSet(param)
first = False
return param_set
return None
def get_outputs():
"""
Returns int : The number of output parameters specified
-------
"""
global OUTPUTS
n = 0
for item in sheet.items:
if item['in/out'] == TL[10]:
n += 1
OUTPUTS = n
def buckingham():
"""
Returns Shows the set in buck_area and modifies current_set
-------
"""
global PHYSICAL_PARAMS, PI_LISTS, PI_SETS
if PHYSICAL_PARAMS is not None:
# noinspection PyTypeChecker
PI_SETS[0], PI_LISTS[0] = vpl.buckingham_theorem(PHYSICAL_PARAMS, True)
pi_set_str = str(PI_SETS[0])
formatted_pi_set = pif.format_pi_set(pi_set_str)
buck_area.v_model = formatted_pi_set
if force_area.v_model is None or force_area.v_model == "":
force_area.v_model = formatted_pi_set
if check1.v_model:
global CHOSEN_PI_SET, CHOSEN_PI_LIST
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
if PI_LISTS[0]:
return True
return False
def force_buckingham(widget, event, data):
"""
Parameters
----------
widget force_buck_btn : button to check pi set
Returns Enables selection of the specified pi set if it is valid
-------
"""
widget.disabled = True
widget.loading = True
if force_buck_btn.children == [TL[11]]:
param_set = gen_parameter_set()
global OUTPUTS
out_n = OUTPUTS
try:
global PI_LISTS
PI_LISTS[1] = pif.format_force_area(force_area.v_model)
global PI_SETS
PI_SETS[1] = vpl.force_buckingham(param_set, *PI_LISTS[1])
if pif.check_outputs(PI_LISTS[1], param_set, out_n):
raise ValueError(TL[12])
force_area.error_messages = ""
force_area.success_messages = TL[13]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
if ' | ' in force_area.v_model:
force_area.v_model = force_area.v_model.replace(' | ', '\n')
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
except Exception as e:
force_area.success_messages = ""
force_area.error_messages = TL[15] + str(e)
else:
force_area.success_messages = ""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check2.v_model:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_area.messages = ""
force_buck_btn.children = [TL[11]]
widget.loading = False
widget.disabled = False
def automatic_buckingham(widget, event, data):
"""
Parameters
----------
widget auto_buck_btn : button to perform automatic Buckingham analysis
Returns Fills auto_buck_table with the resulting pi sets
-------
"""
widget.disabled = True
widget.loading = True
param_set = gen_parameter_set()
combinator_pi_set, alternative_set_dict = vpl.automatic_buckingham(param_set, True)
global PI_SETS, PI_LISTS, PHYSICAL_PARAMS, OUTPUTS
for n in combinator_pi_set:
PI_SETS[2].append(combinator_pi_set[n][0])
PI_LISTS[2].append(list(combinator_pi_set[n][1]))
items = []
i = 0
j = 1
del_index = []
for exp in alternative_set_dict:
if not pif.check_outputs(PI_LISTS[2][i], PHYSICAL_PARAMS, OUTPUTS):
items.append({"pi set number": j, "expressions": exp})
j += 1
else:
del_index.append(i)
i += 1
del_index.reverse()
for i in del_index:
PI_SETS[2].pop(i)
PI_LISTS[2].pop(i)
auto_buck_table.items = items
if force_buck_btn.children == [TL[11]]:
force_copy_btn.disabled = False
check3.disabled = False
widget.loading = False
widget.disabled = False
def force_copy(widget, event, data):
"""
Returns Copies the selected pi set from auto_buck_table or buck area to force_area
-------
"""
l = len(auto_buck_table.items)
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number']:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
force_area.v_model = pif.format_auto_pi_set(auto_buck_table.v_model[0]['expressions'])
break
elif check1.v_model:
force_area.v_model = buck_area.v_model
def check1_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in buck_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check2.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check2_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in force_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[1]
CHOSEN_PI_LIST = PI_LISTS[1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check3_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the selected pi set in auto_buck_table
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check2.v_model = False
l = len(auto_buck_table.items)
if auto_buck_table.v_model:
if auto_buck_table.v_model[0]['pi set number'] is None:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
else:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
update_current_set()
break
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def select_auto_pi_set(widget, event, data):
"""
Parameters
----------
data dict: Contains the pi set number of the selected pi set in the automatic buckingham data table
Returns Modifies current set accordingly
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check3.v_model:
if data['value']:
pi_set_nb = data['item']['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def pi_set_html(pi_set, math=True):
"""
Parameters
----------
pi_set: Pi set in a string form (with " | " separators between pi numbers)
math: display expression as Latex math (default True)
Returns A list of v.HTML widgets that are to be used as children of a v.CardText
-------
"""
if not math:
pi_set = pi_set.replace("**", "°°")
pi_set = pi_set.replace("*", " * ")
pi_set = pi_set.replace("°°", "**")
spt_pi_set = pi_set.split("| ")
card_text_children = []
for pi in spt_pi_set:
card_text_children.append(v.Html(tag='div', children=[pi]))
return card_text_children
else:
pi_set = pi_set.replace("**", "^{")
spt_pi_set = pi_set.split("| ")
for i in range(len(spt_pi_set)):
pi_expr = spt_pi_set[i]
pi_expr = pi_expr.replace(f"pi", f"\pi_", 1)
pi = list(pi_expr)
open_bracket = False
for j in range(len(pi)):
if pi[j] == "{":
open_bracket = True
if pi[j] == "*" and open_bracket:
pi[j] = "}"
open_bracket = False
pi_expr = "".join(pi)
pi_expr = pi_expr.replace("}", "}\\ \cdot \\ ")
pi_expr = pi_expr.replace("*", "\\ \cdot \\ ")
if open_bracket:
pi_expr += "}"
pi_expr = pi_expr.replace("=", "\\ = \\")
spt_pi_set[i] = pi_expr
card_text_children = []
str_latex = r"$"
for pi in spt_pi_set:
str_latex += pi + r"\\"
card_text_children.append(widgets.HTMLMath(str_latex + "$"))
return card_text_children
def update_current_set():
"""
Returns Shows the current selected pi set to the user in current_set Card
-------
"""
global CHOSEN_PI_LIST
out_set = pif.pi_list_to_str(CHOSEN_PI_LIST)
if out_set:
current_set.children[0].children = [TL[52]]
current_set.color = "green lighten-3"
else:
current_set.children[0].children = [TL[53]]
current_set.color = "grey lighten-3"
current_set.children[1].children = pi_set_html(out_set)
def del_item(widget, event, data):
"""
Returns Deletes the selected parameter from the sheet data table
-------
"""
if sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(len(sheet.items)):
if sheet.items[i]['name'] == item_name:
if i == len(sheet.items):
sheet.items = sheet.items[:-1]
else:
sheet.items = sheet.items[0:i] + sheet.items[i + 1:]
break
def del_all(widget, event, data):
"""
Returns Deletes all parameters from the sheet data table
-------
"""
sheet.items = []
def up_item(widget, event, data):
"""
Returns Moves up the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(1, l):
if sheet.items[i]['name'] == item_name:
if i == l:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]]
else:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]] + sheet.items[i + 1:]
break
def down_item(widget, event, data):
"""
Returns Moves down the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l - 1):
if sheet.items[i]['name'] == item_name:
if i == l - 1:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]]
else:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]] + sheet.items[i + 2:]
break
def set_as_out(widget, event, data):
"""
Returns Sets the selected parameter as output in the sheet data table
-------
"""
l = len(sheet.items)
if l > 0 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l):
if sheet.items[i]['name'] == item_name:
if sheet.items[i]['in/out'] == 'Input':
if sheet.items[i]['lower bound'] is None or sheet.items[i]['lower bound'] == "":
const_alert.value = True
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Output'}] + sheet.items[i + 1:]
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Input'}] + sheet.items[i + 1:]
break
def error_end(widget, event, data):
"""
Parameters
----------
widget Current widget
Returns Hides the error messages on the current widget
-------
"""
widget.error_messages = ""
def pint_link(widget, event, data):
"""
Returns Opens browser to a page with all pint base units
-------
"""
webbrowser.open_new(r"https://raw.githubusercontent.com/hgrecco/pint/master/pint/default_en.txt")
def new_log(log, success: bool):
"""
Parameters
----------
log The string to be shown if the logs field
success If true, the log will be displayed in green (in red if False)
Returns Replaces previous log with current log in the logs field
-------
"""
if success:
logs_card.class_ = logs_card.class_ + "; green--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
else:
logs_card.class_ = logs_card.class_ + "; red--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
def choose_dir(widget, event, data):
"""
Returns Opens the dialog_dir dialog box and initializes it
-------
"""
global WORKDIR
dialog_dir.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog_dir.v_model = True
def hide_dir(chooser):
"""
Returns Effectively changes the current work directory (WORKDIR) and closes the dialog_dir dialog box
-------
"""
global WORKDIR
old_workdir = WORKDIR
spl.add_temp(old_workdir)
WORKDIR = fc_dir.selected
spl.move_temp(old_workdir, WORKDIR)
dialog_dir.v_model = False
new_log(f"Work directory: {WORKDIR}", True)
dir_btn.color = "green"
time.sleep(0.5)
dir_btn.color = "default"
def save(widget, event, data):
"""
Parameters
----------
widget The save button int the toolbar
Returns Creates a new pyVPLM save in the work directory with a default name containing date and time
-------
"""
widget.disabled = True
global WORKDIR
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
file_path = WORKDIR + "\pyVPLM_" + dt_string + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model, select_reg_type.v_model,
nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
widget.disabled = False
new_log(f"Saved at: {file_path}", True)
widget.color = "green"
time.sleep(0.5)
widget.color = "default"
def save_as(widget, event, data):
"""
Returns Shows the save dialog
-------
"""
global WORKDIR
dialog.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog.v_model = True
def hide_save_as(widget, event, data):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Saves a .txt file with all current user input to the specified path and hides the save dialog
-------
"""
global WORKDIR
save_as_tf.error_messages = ""
if save_as_tf.v_model.strip():
file_path = WORKDIR + "\\" + save_as_tf.v_model + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model,
select_reg_type.v_model, nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
dialog.v_model = False
widget.disabled = False
widget.loading = False
new_log(f"Saved at: {file_path}", True)
save_as_btn.color = "green"
time.sleep(0.5)
save_as_btn.color = "default"
else:
save_as_tf.error_messages = "please specify a file name"
def save_plots(widget, event, data):
"""
Parameters
----------
widget The save all plots button from the toolbar
Returns Saves all the plots that were in the temp directory in the work directory with default names with date and time
-------
"""
try:
spl.save_all_plots(WORKDIR)
new_log(f"All plots saved at: {WORKDIR}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
except FileNotFoundError:
new_log(f"No plots to save", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
def load(widget, event, data):
"""
Returns Shows the load dialog
-------
"""
global WORKDIR
fc_load.default_path = WORKDIR
dialog2.v_model = True
def hide_ld(chooser):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Loads a .txt file and modifies the state of all widgets accordingly, hides the load dialog
-------
"""
file_path = fc_load.selected
if file_path:
global OLD_PHYSICAL_PARAMS, PHYSICAL_PARAMS, OUTPUTS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST,\
RESULT_DF, RESULT_PI, DEPENDENCY_CHECK_STATE, REGRESSION_PI_LIST, MODELS
try:
load_tuple = sl.load(file_path)
except FileNotFoundError:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, file does not exist", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
if len(load_tuple) != 20:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, invalid file", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
dialog2.v_model = False
fc_load.reset()
load_btn.color = "green"
new_log(f"Loaded: {file_path}", True)
sheet.items = load_tuple[0]
buck_area.v_model = load_tuple[1]
force_area.v_model = load_tuple[2]
auto_buck_table.items = load_tuple[3]
tab2_state = load_tuple[4]
PHYSICAL_PARAMS = load_tuple[5]
OLD_PHYSICAL_PARAMS = load_tuple[5]
OUTPUTS = load_tuple[6]
PI_SETS = load_tuple[7]
CHOSEN_PI_SET = load_tuple[8]
PI_LISTS = load_tuple[9]
CHOSEN_PI_LIST = load_tuple[10]
update_current_set()
check1.v_model = tab2_state[0]
check2.v_model = tab2_state[1]
check3.v_model = tab2_state[2]
if tab2_state[3]:
force_area.error_messages = ""
force_area.success_messages = ""
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_buck_btn.children = [TL[11]]
else:
force_area.error_messages = ""
force_area.success_messages = TL[18]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
if tab2_state[4] == 0:
check3.disabled = True
else:
check3.disabled = False
setattr(auto_buck_table, 'v_model', [auto_buck_table.items[tab2_state[4] - 1]])
anticipated_mo_entry.v_model = load_tuple[12][2]
change_tab_3()
phy_const_area.v_model = load_tuple[11][0]
pi_const_area.v_model = load_tuple[11][1]
select_DOE.v_model = load_tuple[12][0]
select_log.v_model = load_tuple[12][1]
does = load_tuple[13]
if does:
doeX, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active = does
reduced_parameter_set, reduced_pi_set = PHYSICAL_PARAMS, CHOSEN_PI_SET
for out in list(PHYSICAL_PARAMS.dictionary.keys())[-OUTPUTS:]:
reduced_parameter_set, reduced_pi_set = vpl.reduce_parameter_set(reduced_parameter_set,
reduced_pi_set,
elected_output=out)
init_doe_plots(doeX, reduced_parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active,
reduced_pi_set)
if len(doe_box.children) == 3:
doe_box.children = list(doe_box.children) + [exp_panel_doe]
result_headers, result_items = load_tuple[14]
result_data.headers = csv.format_headers(result_headers)
result_data.items = result_items
if result_items:
RESULT_DF = pd.DataFrame(result_items)
func_x_to_pi = vpl.declare_func_x_to_pi(PHYSICAL_PARAMS, CHOSEN_PI_SET)
ordered_columns = []
for key in PHYSICAL_PARAMS.dictionary:
ordered_columns.append(f"{key} [{PHYSICAL_PARAMS.dictionary[key].defined_units}]")
re_ordered_result = RESULT_DF[ordered_columns]
RESULT_PI = func_x_to_pi(re_ordered_result.to_numpy(dtype=float))
threshold_slider.v_model = load_tuple[15]
DEPENDENCY_CHECK_STATE = load_tuple[16]
REGRESSION_PI_LIST = load_tuple[17]
reg_state = load_tuple[18]
if reg_state:
select_pi0.v_model = reg_state[0]
select_reg_criteria.v_model = reg_state[1]
model_order_entry.v_model = int(reg_state[2])
select_reg_type.v_model = reg_state[3]
MODELS = load_tuple[19]
if MODELS:
regression_models(models_btn, 0, 0, slider_state=int(reg_state[4]))
if tabs.v_model == 5:
change_tab_5()
if tabs.v_model == 6:
change_tab_6()
time.sleep(0.5)
load_btn.color = "default"
else:
dialog2.v_model = False
# --------- Buckingham Tab Functions -----------------------------------------------------------------------------------
def add_pi(widget, event, data):
"""
Returns Adds the pi number specified in force_eq to force_area
-------
"""
index = pif.get_pi_index(force_area.v_model)
if force_eq.v_model is None or force_eq.v_model == "":
force_eq.error_messages = TL[21]
else:
exp = pif.format_input(force_eq.v_model, index)
if force_area.v_model is not None:
force_area.v_model += exp + "\n"
else:
force_area.v_model = exp + "\n"
force_eq.v_model = ""
def tab2_reload():
"""
Returns Reloads Buckingham Theorem Tab
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST, PI_SETS, PI_LISTS
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
update_current_set()
buck_area.v_model = ""
check1.v_model = True
force_buck_btn.disabled = False
force_buck_btn.children = [TL[11]]
force_eq.v_model = ""
force_eq.error_messages = ""
force_area.v_model = ""
force_area.success_messages = ""
force_area.error_messages = ""
force_area.readonly = False
force_area.clearable = True
add_pi_btn.disabled = False
force_copy_btn.disabled = False
check2.disabled = True
check2.v_model = False
auto_buck_btn.disabled = False
auto_buck_table.items = []
check3.disabled = True
check3.v_model = False
def tab2_disable():
"""
Returns Disables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = True
auto_buck_btn.disabled = True
check1.disabled = True
check1.v_model = False
def tab2_enable():
"""
Returns Enables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = False
auto_buck_btn.disabled = False
check1.disabled = False
# -----DOE Tab functions------------------------------------------------------------------------------------------------
def add_phy_const(widget, event, data):
"""
Returns Adds a physical constraint from the text field to the text area
-------
"""
phy_const_entry.error_messages = ""
if phy_const_entry.v_model is None or phy_const_entry.v_model == "":
phy_const_entry.error_messages = TL[21]
else:
exp = phy_const_entry.v_model
if phy_const_area.v_model is not None:
phy_const_area.v_model += exp + "\n"
else:
phy_const_area.v_model = exp + "\n"
phy_const_entry.v_model = ""
def add_pi_const(widget, event, data):
"""
Returns Adds a pi constraint from the text field to the text area
-------
"""
pi_const_entry.error_messages = ""
if pi_const_entry.v_model is None or pi_const_entry.v_model == "":
pi_const_entry.error_messages = TL[21]
else:
exp = pi_const_entry.v_model
if pi_const_area.v_model is not None:
pi_const_area.v_model += exp + "\n"
else:
pi_const_area.v_model = exp + "\n"
pi_const_entry.v_model = ""
def nb_of_terms():
"""
Returns The maximum number of terms for the given model order and the amount of input pi numbers
-------
"""
n = int(anticipated_mo_entry.v_model)
p = len(CHOSEN_PI_LIST) - OUTPUTS
return noc.coefficient_nb(n, p, approx=(p >= 2*n and n > 10))
def mo_to_size(widget, event, data):
"""
Parameters
----------
widget Anticipated model order field
Returns Sets the default wished size to 10x max number of terms
-------
"""
nb_terms = nb_of_terms()
wished_size_entry.v_model = DOE_MULTIPLIER * nb_terms
model_order_entry.v_model = widget.v_model
widget.messages = ""
wished_size_entry.messages = ""
def check_size(widget, event, data):
"""
Returns Checks if the wished size is not too low or too high compared to the default wished size and shows warnings
-------
"""
expected = DOE_MULTIPLIER * nb_of_terms()
if int(wished_size_entry.v_model) > int(2*expected) or\
int(0.5 * expected) > int(wished_size_entry.v_model) >= int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size not advised for model order"
anticipated_mo_entry.messages = "Warning: size not advised for model order"
elif int(wished_size_entry.v_model) < int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size too low for model order, model computation will fail"
anticipated_mo_entry.messages = "Warning: size too low for model order, model computation will fail"
else:
wished_size_entry.messages = ""
anticipated_mo_entry.messages = ""
def gen_doe(widget, event, data):
"""
Returns Displays the generate DOE dialog box and initializes it
-------
"""
global WORKDIR
dialog3.v_model = True
dialog3.children[0].children[1].children = ["Current work directory: " + WORKDIR]
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
doe_tf.v_model = "pyVPLM_" + dt_string
def customize_2d_plot(widget, event, data):
"""
Parameters
----------
widget The current range slider or one of the two selection fields (for the axis)
Returns
-------
"""
global AX, TOTAL_DOE
widget.loading = True
new_df = TOTAL_DOE
i = 0
for col in new_df:
[col_min, col_max] = range_sliders.children[2*i + 1].v_model
new_df = new_df[(new_df[col] >= col_min) & (new_df[col] <= col_max)]
i += 1
with customizable_2d_plot_output:
clear_output(wait=True)
AX.clear()
AX.set_xlabel(select_2d_x.v_model)
AX.set_ylabel(select_2d_y.v_model)
AX.plot(new_df[select_2d_x.v_model], new_df[select_2d_y.v_model], 'o')
display(AX.figure)
widget.loading = False
def init_doe_plots(doeX, parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active, pi_set, log=True):
"""
Parameters
----------
doeX numpy array with the DOE of physical parameters
parameter_set PositiveParameterSet with all input physical parameters
doePi numpy array with the DOE of pi numbers (elected points)
doePi_all numpy array with the DOE of pi numbers (all points)
doePi_nearest numpy array with the DOE of pi numbers (3 nearest from objective points)
doePi_all_obj numpy array with the DOE of pi numbers (all objective points)
doePI_active numpy array with the DOE of pi numbers (active objective points)
pi_set PositiveParameterSet with all input pi numbers
log Toggles display in log space for all plots
Returns Initializes all DOE plots
-------
"""
spl.add_temp(WORKDIR)
_, _, ww, _ = GetWindowRect(GetForegroundWindow())
error = False
if log:
doeX = np.log10(doeX)
doePi = np.log10(doePi)
doePi_all = np.log10(doePi_all)
doePi_nearest = np.log10(doePi_nearest)
doePi_all_obj = np.log10(doePi_all_obj)
doePI_active = np.log10(doePI_active)
columns = []
constants = []
for key in parameter_set.dictionary:
if log:
column_name = f"log10({key}) [{parameter_set.dictionary[key].defined_units}]"
else:
column_name = f"{key} [{parameter_set.dictionary[key].defined_units}]"
columns.append(column_name)
if len(parameter_set.dictionary[key].defined_bounds) == 0:
constants.append(column_name)
df = pd.DataFrame(data=doeX, columns=columns)
df = df.drop(labels=constants, axis=1)
phy_scatter_matrix_output.clear_output()
with phy_scatter_matrix_output:
try:
plt.rcParams['axes.labelsize'] = 14
sm1 = scatter_matrix(df, figsize=(30*ww/1928, 30*ww/1928), alpha=0.9, diagonal="kde")
for i in range(np.shape(sm1)[0]):
for j in range(np.shape(sm1)[1]):
if i < j:
sm1[i, j].set_visible(False)
elif i == j:
x_ = sm1[i, j].lines[0].get_xdata()
y_ = sm1[i, j].lines[0].get_ydata()
sm1[i, j].fill_between(x_, y_, alpha=0.54) # Petite ref
try:
plt.savefig(WORKDIR + "\\temp\\phy_scatter_matrix.pdf")
except Exception:
new_log("Failed to save phy_scatter_matrix.pdf in \\temp", False)
plt.show()
except ValueError:
error = True
columns_2 = []
for key in pi_set.dictionary:
if log:
columns_2.append("log10(" + key + ")")
else:
columns_2.append(key)
df_2 =
|
pd.DataFrame(data=doePi, columns=columns_2)
|
pandas.DataFrame
|
#!/usr/local/bin/python
# coding: utf-8
import sys
import re
import csv
import pandas as pd
import bs4 as bs
import numpy as np
from bs4 import BeautifulSoup
from bs4.element import Tag
from pandas import DataFrame
__author__ = "<NAME>"
__version__ = "0.1-dev"
__email__ = "<EMAIL>"
__license__ = "GPLv3"
# regular expression to get the id of the corresponding page
page_id_regex = r"facs_([\d]*)_|$"
# Chars that will be removed from entities
bad_chars = '[]'
bad_chars_trans = str.maketrans("", "", bad_chars)
def load_tei_file(filename: str) -> BeautifulSoup:
"""Loads TEI File and returns BeatufiulSoup
Args:
filename (str): Name of the file to load
Returns:
BeautifulSoup Object
Raises:
Exception
"""
with open(filename, "r") as f:
content = f.read()
return bs.BeautifulSoup(content, 'lxml')
def clean_string(text: str) -> str:
"""Removes bad characters from given string
Args:
text (str): String to remove bad characters from
Returns:
str
"""
return text.translate(bad_chars_trans)
def clean_price(price_string: str) -> float:
"""Cleans prices and converts them to floats or NaN if it fails
Args:
price_string (str): String containing price that will be converted to float
Returns:
float
"""
price = np.nan
try:
price_string = price_string.replace(",", ".")
price = float(price_string)
except Exception:
pass
return price
def get_entry_id(entry: Tag) -> str:
"""Extracts the ID of a TEI elemnt
Args:
entry (Tag): Element
Returns:
str: ID
"""
try:
if entry.parent.name != 'l':
return None
return entry.parent['facs'].replace("#", "")
except:
return None
def get_page_id(entry_id: str) -> str:
"""Get the number of the page of the element
Args:
entry_id (str): ID of the element
Returns:
str: Number of the page
"""
if isinstance(entry_id, str):
page_id_matches = re.findall(page_id_regex, entry_id)
page_id = page_id_matches[0] if len(page_id_matches) > 0 else None
return page_id
return None
def get_tags(soup: BeautifulSoup, tag_name: str) -> DataFrame:
"""Process soup and extract all elements of type tag_name
Args:
soup (BeautifulSoup): content of the TEI file
tag_name (str): Name of the element
Returns:
DataFrame: Frame containing a row per element
"""
df = pd.DataFrame(columns=['page_id', 'value'])
query = soup.find_all(tag_name)
for tag in query:
entry_id = get_entry_id(tag)
page_id = get_page_id(entry_id)
value = tag.text
df = df.append({'page_id': page_id, 'value': value}, ignore_index=True)
return df
def process(soup: BeautifulSoup) -> None:
"""Process content of TEI file and extract data
Args:
soup (BeautifulSoup): Soup of the TEI file
Returns:
None
Raises:
Exception
"""
# init dataframes
items = pd.DataFrame(columns=['description'])
entries = pd.DataFrame(columns=['page_id', 'item_id', 'price', 'quantity'])
zones =
|
pd.DataFrame(columns=['entry_id', 'x1', 'y1', 'x2', 'y2'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Manipulação de dados - II
# ## *DataFrames*
#
# O *DataFrame* é a segunda estrutura basilar do *pandas*. Um *DataFrame*:
# - é uma tabela, ou seja, é bidimensional;
# - tem cada coluna formada como uma *Series* do *pandas*;
# - pode ter *Series* contendo tipos de dado diferentes.
# In[1]:
import numpy as np
import pandas as pd
# ## Criação de um *DataFrame*
# O método padrão para criarmos um *DataFrame* é através de uma função com mesmo nome.
#
# ```python
# df_exemplo = pd.DataFrame(dados_de_interesse, index = indice_de_interesse,
# columns = colunas_de_interesse)
# ```
# Ao criar um *DataFrame*, podemos informar
# - `index`: rótulos para as linhas (atributos *index* das *Series*).
# - `columns`: rótulos para as colunas (atributos *name* das *Series*).
# No _template_, `dados_de_interesse` pode ser
#
# * um dicionário de:
# * *arrays* unidimensionais do *numpy*;
# * listas;
# * dicionários;
# * *Series* do *pandas*.
# * um *array* bidimensional do *numpy*;
# * uma *Series* do *Pandas*;
# * outro *DataFrame*.
# ### *DataFrame* a partir de dicionários de *Series*
#
# Neste método de criação, as *Series* do dicionário não precisam possuir o mesmo número de elementos. O *index* do *DataFrame* será dado pela **união** dos *index* de todas as *Series* contidas no dicionário.
# Exemplo:
# In[2]:
serie_Idade = pd.Series({'Ana':20, 'João': 19, 'Maria': 21, 'Pedro': 22}, name="Idade")
# In[3]:
serie_Peso = pd.Series({'Ana':55, 'João': 80, 'Maria': 62, 'Pedro': 67, 'Túlio': 73}, name="Peso")
# In[4]:
serie_Altura = pd.Series({'Ana':162, 'João': 178, 'Maria': 162, 'Pedro': 165, 'Túlio': 171}, name="Altura")
# In[5]:
dicionario_series_exemplo = {'Idade': serie_Idade, 'Peso': serie_Peso, 'Altura': serie_Altura}
# In[6]:
df_dict_series = pd.DataFrame(dicionario_series_exemplo)
# In[7]:
df_dict_series
# > Compare este resultado com a criação de uma planilha pelos métodos usuais. Veja que há muita flexibilidade para criarmos ou modificarmos uma tabela.
#
# Vejamos exemplos sobre como acessar intervalos de dados na tabela.
# In[8]:
pd.DataFrame(dicionario_series_exemplo, index=['João','Ana','Maria'])
# In[9]:
pd.DataFrame(dicionario_series_exemplo, index=['Ana','Maria'], columns=['Altura','Peso'])
# Neste exemplo, adicionamos a coluna `IMC`, ainda sem valores calculados.
# In[10]:
pd.DataFrame(dicionario_series_exemplo, index=['Ana','Maria','Paula'],
columns=['Peso','Altura','IMC'])
# In[11]:
df_exemplo_IMC = pd.DataFrame(dicionario_series_exemplo,
columns=['Peso','Altura','IMC'])
# Agora, mostramos como os valores do IMC podem ser calculados diretamente por computação vetorizada sobre as *Series*.
# In[12]:
df_exemplo_IMC['IMC'] = round(df_exemplo_IMC['Peso']/(df_exemplo_IMC['Altura']/100)**2,2)
# In[13]:
df_exemplo_IMC
# ### *DataFrame* a partir de dicionários de listas ou *arrays* do *numpy*
#
# Neste método de criação, os *arrays* ou as listas **devem** possuir o mesmo comprimento. Se o *index* não for informado, o *index* será dado de forma similar ao do objeto tipo *Series*.
# Exemplo com dicionário de listas:
# In[14]:
dicionario_lista_exemplo = {'Idade': [20,19,21,22,20],
'Peso': [55,80,62,67,73],
'Altura': [162,178,162,165,171]}
# In[15]:
pd.DataFrame(dicionario_lista_exemplo)
# Mais exemplos:
# In[16]:
pd.DataFrame(dicionario_lista_exemplo, index=['Ana','João','Maria','Pedro','Túlio'])
# Exemplos com dicionário de *arrays* do *numpy*:
# In[17]:
dicionario_array_exemplo = {'Idade': np.array([20,19,21,22,20]),
'Peso': np.array([55,80,62,67,73]),
'Altura': np.array([162,178,162,165,171])}
# In[18]:
pd.DataFrame(dicionario_array_exemplo)
# Mais exemplos:
# In[19]:
pd.DataFrame(dicionario_array_exemplo, index=['Ana','João','Maria','Pedro','Túlio'])
# ### *DataFrame* a partir de uma *Series* do *pandas*
#
# Neste caso, o *DataFrame* terá o mesmo *index* que a *Series* do *pandas* e apenas uma coluna.
# In[20]:
series_exemplo =
|
pd.Series({'Ana':20, 'João': 19, 'Maria': 21, 'Pedro': 22, 'Túlio': 20})
|
pandas.Series
|
"""
Script to make boxplots for regions with vs without uces
<NAME>
Feb 2018
Copyright 2018 Harvard University, Wu Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
In:
primary - uce file
secondary - file with list of domain filenames (give the size of the regions with/without uces)
tertiary - genes (optional - if included will print the number of elements in this file per domain with/without uces)
quinary - mouse uces in original uce coordinates (optional - if included will add another box for regions that are only part of this subset)
Out:
pdf file with each of the domains in a seperate subplot, and all as the final most subplot
To Do:
make the gene boxplots not explicit, but an argument to supply
"""
import argparse
import pandas as pd
import pybedtools as pbt
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from itertools import cycle
import matplotlib
import numpy as np
from scipy import stats
import math
from numpy import nan as Nan
# set args
def get_args():
parser = argparse.ArgumentParser(description="Description")
parser.add_argument("file",type=str,help='the primary element file') # UCEs
parser.add_argument("-s","--secondaryfeatures",required=True,type=argparse.FileType('rU'),help="a file with a list of file names with the secondary features to query") # Domains
parser.add_argument("-t","--tertiaryfeature",required=False,type=str,help="the tertiary features file")# Genes
parser.add_argument("-q","--quinaryfeature",required=False,type=str,help="the quinary elements file - a subset of the primary features")# Mouse UCEs
parser.add_argument('-n',"--stringname",type=str,help='string to add to the outfile name')
return parser.parse_args()
# get bt features
def get_bedtools_features(strFileName):
return pbt.BedTool(strFileName)
# intersect a file by how many times a feature on b is in the interval
def intersect_bedfiles_c_true(afile,bfile):
return afile.intersect(bfile,c=True)
# convert bedtool to panda
def convert_bedtools_to_panda(btfeature):
return pd.read_table(btfeature.fn,header=None)
# coordinate labels and size
def label_coordinate_columns(pdfeature):
pdfeature['size'] = pdfeature.loc[:,2].astype(int)-pdfeature.loc[:,1].astype(int)
pdfeature.columns.values[0]='chr'
pdfeature.columns.values[1]='start'
pdfeature.columns.values[2]='end'
return pdfeature
# create panda for overlap count datasets
def count_overlap_df(secondary,file,label):
pdintersect = intersect_bedfiles_c_true(secondary,file)
pdfeatures = convert_bedtools_to_panda(pdintersect)
pdcoordinates = label_coordinate_columns(pdfeatures)
pdcoordinates.columns.values[3]='intersect_{0}'.format(label)
return pdcoordinates
# run primary, tertiary, quinary overlaps
def run_overlaps_for_ptq_against_s(secondary,pfile,tfile,qfile):
pdprimary = count_overlap_df(secondary,pfile,'primary') # make the pandas data sets for the count overlaps
concat = pdprimary # rename to preserve
if tfile: # if optional arguments, add to panda
pdtertiary = count_overlap_df(secondary,tfile,'tertiary')
concat = concat.merge(pdtertiary,how='left',on=['chr','start','end','size'])
concat['density_tertiary'] = concat['intersect_tertiary']/concat['size']
if qfile: # if optional arguments, add to panda
pdquinary = count_overlap_df(secondary,qfile,'quinary')
concat = concat.merge(pdquinary,how='left',on=['chr','start','end','size'])
concat['size'] /= 1000.
return concat
# move elements without any overlaps
def remove_rows_with_no_overlaps(overlaps,column):
return overlaps[overlaps[column]!=0]
# chunk data into number of graphs per page
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
# format the with/without regions for graphing
def format_with_without_data_for_boxplot(pdfeatures,column,pfile,qfile):
dropzero = (pdfeatures.loc[pdfeatures['intersect_primary'] > 0])
sumdrop = dropzero[column].reset_index(drop=False)
sumdrop['region'] = 'With UCEs'
keepzero = (pdfeatures.loc[pdfeatures['intersect_primary'] < 1])
sumkeep = keepzero[column].reset_index(drop=False)
sumkeep['region'] = 'Without UCEs'
tertiarysum = pd.concat([sumdrop,sumkeep])
if qfile:
keepquin = (pdfeatures.loc[pdfeatures['intersect_quinary'] > 0])
sumquin = keepquin[column].reset_index(drop=False)
sumquin['region'] = 'Mouse UCEs'
tertiarysum = pd.concat([tertiarysum,sumquin])
tertiarysum.drop(columns=['index'],inplace=True)
return tertiarysum
# set the number of lines to darken on the boxplot
def set_num_lines_to_color(qfile):
if qfile:
numboxes,numlines = 3,9
else:
numboxes,numlines = 2,8
return numboxes,numlines
# convert panda to bedtool
def panda_to_bedtool(panda):
arArFeatures = panda.values.tolist()
btFeatures = get_bedtools_features(arArFeatures)
return btFeatures
# get standard deviation, from ruth's random region script
def getPopSD(arObservedOverlaps):
floatLen = float(len(arObservedOverlaps))
floatMean = float(sum(arObservedOverlaps))/len(arObservedOverlaps)
dSumOfSquares = sum([((float(number) - floatMean) ** 2) for number in arObservedOverlaps])
dVariance = float(dSumOfSquares) / floatLen
return math.sqrt(dVariance)
# ks test from ruth's random region script
def KSTest(aOverlapBP):
mean = float(sum(aOverlapBP)) / len(aOverlapBP)
sd = getPopSD(aOverlapBP)
rvNormMatched = stats.norm.rvs(loc=mean, scale=sd, size=len(aOverlapBP))
npArOverlapBP = np.array(aOverlapBP)
ksStat, KsPval = stats.ks_2samp(npArOverlapBP, rvNormMatched)
if KsPval <= 0.05:
strKSresult = "No"
else:
strKSresult = "Yes"
return ksStat, KsPval, strKSresult
# save panda to file with mode 'a' for appending
def save_panda(pdData,strFilename):
pdData.to_csv(strFilename,sep='\t',index=True,mode='a')
# run ks test for normal distribution and choose appropriate stats test
def run_appropriate_test(pdgroup,yvalue,qfile,sfile,statsname):
withuces = pdgroup[yvalue].loc[pdgroup['region']=='With UCEs']
withoutuces = pdgroup[yvalue].loc[pdgroup['region']=='Without UCEs']
withucestat = withuces.describe()
withoutucestat = withoutuces.describe()
pdstat = pd.concat([withucestat,withoutucestat],axis=1)
pdstat.columns = ['withuce_{0}'.format(sfile),'withoutuce_{0}'.format(sfile)]
labels = pdstat.index.tolist()
labels.extend(['coef','pvalue'])
# ksStat,KsPval,strKSresult = KSTest(withuces)
# if strKSresult == 'Yes':
# statcoef,statpval = stats.ttest_ind(withuces,withoutuces)# or ttest_rel()
# stattest = 'TT'
# formatpval = '{:.01e}'.format(statpval)
# else:
# statcoef,statpval = stats.mannwhitneyu(withuces,withoutuces)
# stattest = 'MW'
# formatpval = '{:.01e}'.format(statpval)
statcoef,statpval = stats.mannwhitneyu(withuces,withoutuces)
if qfile:
mouseuces = pdgroup[yvalue].loc[pdgroup['region']=='Mouse UCEs']
mstatcoef,mstatpval = stats.mannwhitneyu(withuces,mouseuces)
mformatpval = formatpval = '{:.02e}'.format(mstatpval)
mouseucestat = mouseuces.describe()
pdstat = pd.concat([pdstat,mouseucestat],axis=1)
pdstat.columns = ['withuce_{0}'.format(sfile),'withoutuce_{0}'.format(sfile),'mouseuce_{0}'.format(sfile)]
coef = pd.Series([Nan,Nan,mstatcoef],index=['withuce_{0}'.format(sfile),'withoutuce_{0}'.format(sfile),'mouseuce_{0}'.format(sfile)])
pval = pd.Series([Nan,Nan,mformatpval],index=['withuce_{0}'.format(sfile),'withoutuce_{0}'.format(sfile),'mouseuce_{0}'.format(sfile)])
pdstat = pdstat.append(coef,ignore_index=True)
pdstat = pdstat.append(pval,ignore_index=True)
else:
mformatpval=None
empty = pd.Series([Nan,Nan],index=['withuce_{0}'.format(sfile),'withoutuce_{0}'.format(sfile)])
pdstat = pdstat.append(empty,ignore_index=True)
pdstat = pdstat.append(empty,ignore_index=True)
stattest = 'Mann Whiteny U p-value'
formatpval = '{:.02e}'.format(statpval)
pdstat['labels'] = labels
pdstat.set_index('labels',inplace=True,drop=True)
pdstat.loc['coef','withoutuce_{0}'.format(sfile)] = statcoef
pdstat.loc['pvalue','withoutuce_{0}'.format(sfile)] = formatpval
save_panda(pdstat,'{0}.txt'.format(statsname))
return formatpval,stattest,mformatpval
# get the location where to add the p value annotation
def set_pval_label_location(pdgroup,yvalue):
if yvalue == 'size':
addvalue = 1000
elif yvalue == 'intersect_tertiary':
addvalue = 13
else:
addvalue = .000000005
return addvalue
def set_pval_label_location_mouse(pdgroup,yvalue):
if yvalue == 'size':
addvalue = 500
elif yvalue == 'intersect_tertiary':
addvalue = 0
else:
addvalue = .000055
return addvalue
# darken the lines around the boxplot to black
def darkend_boxplot_lines(axes,numboxes,numlines,boxcolor):
#https://stackoverflow.com/questions/36874697/how-to-edit-properties-of-whiskers-fliers-caps-etc-in-seaborn-boxplot
for t,artist in enumerate(axes.artists):
artist.set_edgecolor(boxcolor)
for s in range(t*numboxes,t*numboxes+numlines):
line = axes.lines[s]
line.set_color(boxcolor)
line.set_mfc(boxcolor)
line.set_mec(boxcolor)
# tile the boxplots
def run_tiled_subplots_per_boxplot_dataset(pddata,yvalue,ylabeltext,names,filename,pfile,qfile,statsname):
sns.set_style('ticks')
pp = PdfPages(filename)
plt.figure(figsize=(10,10))
# plt.rcParams['axes.formatter.limits'] = (-3, 3)
sns.set_palette("Blues")
datasetcounter = 0
fig,ax_array = plt.subplots(3,2)
intnum = len(names)
numboxes,numlines = set_num_lines_to_color(qfile)
for data_chunk,name_chunk in zip(chunks(pddata,6),chunks(names,6)):
intPlotCounter = -1
for i,ax_row in enumerate(ax_array):
for j,axes in enumerate(ax_row):
axes.cla()
boxcolor = '#000000'
intPlotCounter += 1
if datasetcounter < len(names):
pdgroup = format_with_without_data_for_boxplot(data_chunk[intPlotCounter],yvalue,pfile,qfile)
sns.boxplot(data=pdgroup,x='region',y=yvalue,showfliers=False,ax=axes,linewidth=.75)
axes.set_ylabel(ylabeltext,size=10)
axes.set_xlabel('Domain Type',size=10)
darkend_boxplot_lines(axes,numboxes,numlines,boxcolor)
axes.set_title(name_chunk[intPlotCounter].split('.',1)[0],size=8)
axes.set_xticklabels(axes.get_xticklabels(),fontsize=8)
plt.setp(axes.xaxis.get_majorticklabels())
formatpval,stattest,mformatpval = run_appropriate_test(pdgroup,yvalue,qfile,name_chunk[intPlotCounter],statsname)
addvalue = set_pval_label_location(pdgroup,yvalue)
ylabelmax = pdgroup[yvalue].quantile(q=.97)
axes.plot([0,0,1,1],[ylabelmax+addvalue,ylabelmax+addvalue,ylabelmax+addvalue,ylabelmax+addvalue],lw=.75,c=boxcolor)
axes.text((0+1)*.5,ylabelmax+addvalue,'{0}: {1}'.format(stattest,formatpval),ha='center',va='bottom',color=boxcolor,size=6,clip_on=False)
if qfile:
maddvalue = set_pval_label_location_mouse(pdgroup,yvalue)
mylabelmax = pdgroup[yvalue].quantile(q=.97)
axes.plot([0,0,2,2],[mylabelmax+maddvalue,mylabelmax+maddvalue,mylabelmax+maddvalue,mylabelmax+maddvalue],lw=.75,c=boxcolor)
axes.text((0+2)*.5,mylabelmax+maddvalue,'{0}: {1}'.format(stattest,mformatpval),ha='center',va='bottom',color=boxcolor,size=6,clip_on=False)
datasetcounter += 1
else:
axes.remove()
pass
plt.tight_layout()
sns.despine()
plt.savefig(pp, format='pdf')
plt.clf()
pp.close()
def main():
args = get_args()
stringname = args.stringname
pfile = args.file
secondaryfiles = [line.strip() for line in args.secondaryfeatures]
if args.tertiaryfeature:
tfile = args.tertiaryfeature
else:
tfile = None
if args.quinaryfeature:
qfile = args.quinaryfeature
else:
qfile = None
lumpsecondary = []
lumpsecondarycoords = []
for sfile in secondaryfiles:
secondary = get_bedtools_features(sfile)
concat = run_overlaps_for_ptq_against_s(secondary,pfile,tfile,qfile)
coords = concat[['chr','start','end']]
lumpsecondarycoords.append(coords)
lumpsecondary.append(concat)
concatsecondary =
|
pd.concat(lumpsecondary)
|
pandas.concat
|
from logging import getLogger
import numpy as np
import pandas as pd
from omnium import Analyser
logger = getLogger('cosar.spn')
def _normalize_feature_matrix(settings, X_filtered):
"""Apply the normalization. Both mag(nitude) and rot(ation) are normalized. Up to caller
to decide if just mag both magrot normalization needed. Additionally, apply
lower troposphere favouring if option is selected."""
logger.debug('normalizing data')
mag = np.sqrt(X_filtered[:, :settings.NUM_PRESSURE_LEVELS] ** 2 +
X_filtered[:, settings.NUM_PRESSURE_LEVELS:] ** 2)
rot = np.arctan2(X_filtered[:, :settings.NUM_PRESSURE_LEVELS],
X_filtered[:, settings.NUM_PRESSURE_LEVELS:])
# Normalize the profiles by the maximum magnitude at each level.
max_mag = mag.max(axis=0)
if settings.FAVOUR_LOWER_TROP:
# This is done by modifying max_mag, which means it's easy to undo by performing
# reverse using max_mag.
# N.B. increasing max_mag will decrease the normalized values.
# Because the values are laid out from highest altitude (lowest pressure) to lowest,
# this will affect the upper trop.
max_mag[:settings.FAVOUR_INDEX] *= settings.FAVOUR_FACTOR
logger.debug('max_mag = {}'.format(max_mag))
norm_mag = mag / max_mag[None, :]
u_norm_mag = norm_mag * np.cos(rot)
v_norm_mag = norm_mag * np.sin(rot)
# Normalize the profiles by the rotation at 850 hPa.
rot_at_level = rot[:, settings.INDEX_850HPA]
norm_rot = rot - rot_at_level[:, None]
index_850hPa = settings.INDEX_850HPA
logger.debug('# prof with mag<1 at 850 hPa: {}'.format((mag[:, index_850hPa] < 1).sum()))
logger.debug('% prof with mag<1 at 850 hPa: {}'.format((mag[:, index_850hPa] < 1).sum() /
mag[:, index_850hPa].size * 100))
u_norm_mag_rot = norm_mag * np.cos(norm_rot)
v_norm_mag_rot = norm_mag * np.sin(norm_rot)
Xu_mag = u_norm_mag
Xv_mag = v_norm_mag
# Add the two matrices together to get feature set.
X_mag = np.concatenate((Xu_mag, Xv_mag), axis=1)
Xu_magrot = u_norm_mag_rot
Xv_magrot = v_norm_mag_rot
# Add the two matrices together to get feature set.
X_magrot = np.concatenate((Xu_magrot, Xv_magrot), axis=1)
return X_mag, X_magrot, max_mag, rot_at_level
class ShearProfileNormalize(Analyser):
"""Normalize profiles by rotating and normalizing on magnitude.
Profiles are normalized w.r.t. rotation by picking a height level, in this case 850 hPa and
rotating the profiles so that the wind vectors at 850 hPa are aligned.
They are normalized w.r.t. to magnitude by calculating the max. magnitude at each height
level (sqrt(u**2 + v**2)) and normalizing by dividing by this.
Additionally, if FAVOUR_LOWER_TROP is set then the max_mag array is multiplied by
FAVOUR_FACTOR above the level defined by FAVOUR_INDEX. This effectively *reduces* their
weighting when PCA/KMeans are applied by decreasing their normalized values.
Reads in the filtered profiles and outputs normalized profiles and max_mag array (to same HDF5
file).
"""
analysis_name = 'shear_profile_normalize'
single_file = True
input_dir = 'omnium_output/{version_dir}/{expt}'
input_filename = '{input_dir}/profiles_filtered.hdf'
output_dir = 'omnium_output/{version_dir}/{expt}'
output_filenames = ['{output_dir}/profiles_normalized.hdf']
norm = 'magrot'
def load(self):
logger.debug('override load')
self.df_filtered =
|
pd.read_hdf(self.task.filenames[0])
|
pandas.read_hdf
|
import pandas as pd
import numpy as np
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import RepeatedKFold, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from scripts.python.routines.betas import betas_drop_na
import pickle
import random
import plotly.express as px
import copy
import statsmodels.formula.api as smf
from sklearn.metrics import mean_squared_error, mean_absolute_error
from scripts.python.pheno.datasets.filter import filter_pheno
from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict
from scripts.python.routines.plot.scatter import add_scatter_trace
from scipy.stats import mannwhitneyu
import plotly.graph_objects as go
import pathlib
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.layout import add_layout, get_axis
from scripts.python.routines.plot.p_value import add_p_value_annotation
from statsmodels.stats.multitest import multipletests
dataset = "GSEUNN"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
status_col = get_column_name(dataset, 'Status').replace(' ','_')
age_col = get_column_name(dataset, 'Age').replace(' ','_')
sex_col = get_column_name(dataset, 'Sex').replace(' ','_')
status_dict = get_status_dict(dataset)
status_passed_fields = status_dict['Control'] + status_dict['Case']
sex_dict = get_sex_dict(dataset)
continuous_vars = {}
categorical_vars = {status_col: [x.column for x in status_passed_fields], sex_col: list(sex_dict.values())}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
betas = pd.read_pickle(f"{path}/{platform}/{dataset}/betas.pkl")
betas = betas_drop_na(betas)
df = pd.merge(pheno, betas, left_index=True, right_index=True)
df.set_index('ID', inplace=True)
df_ctrl = df.loc[(df[status_col] == 'Control'), :]
df_case = df.loc[(df[status_col] == 'ESRD'), :]
path_save = f"{path}/{platform}/{dataset}/special/020_agena"
pathlib.Path(f"{path_save}/figs/cpgs").mkdir(parents=True, exist_ok=True)
pathlib.Path(f"{path_save}/figs/subjects").mkdir(parents=True, exist_ok=True)
agena = pd.read_excel(f"{path}/{platform}/{dataset}/data/agena/proc.xlsx", index_col='feature')
agena = agena.T
agena.index.name = "subject_id"
agena_cpgs = list(set(agena.columns.values) - set(['Group']))
agena.loc[:, agena_cpgs] *= 0.01
subjects_common = sorted(list(set(agena.index.values).intersection(set(df_ctrl.index.values))))
subjects_agena_only = set(agena.index.values) - set(df_ctrl.index.values)
cpgs_common = sorted(list(set(agena_cpgs).intersection(set(betas.columns.values))))
rel_diff_df = pd.DataFrame(index=subjects_common, columns=cpgs_common+['Group'])
for subject in subjects_common:
agena_i = agena.loc[subject, agena_cpgs]
agena_i.dropna(how='all')
cpgs_i = sorted(list(set(agena_i.index.values).intersection(set(betas.columns.values))))
df_i = df_ctrl.loc[subject, cpgs_i]
rel_diff_df.at[subject, 'Group'] = agena.at[subject, 'Group']
fig = go.Figure()
for cpg_id, cpg in enumerate(cpgs_i):
distrib_i = df_ctrl.loc[:, cpg].values
fig.add_trace(
go.Violin(
x=[cpg] * len(distrib_i),
y=distrib_i,
box_visible=True,
meanline_visible=True,
line_color='grey',
showlegend=False,
opacity=1.0
)
)
showlegend = False
if cpg_id == 0:
showlegend = True
meth_epic = df_ctrl.at[subject, cpg]
meth_agena = agena_i.at[cpg]
tmp = (meth_agena - meth_epic) / meth_epic * 100.0
rel_diff_df.at[subject, cpg] = tmp
fig.add_trace(
go.Scatter(
x=[cpg],
y=[meth_epic],
showlegend=showlegend,
name="850K",
mode="markers",
marker=dict(
size=15,
opacity=0.7,
line=dict(
width=1
),
color='red'
),
)
)
fig.add_trace(
go.Scatter(
x=[cpg],
y=[meth_agena],
showlegend=showlegend,
name="Agena",
mode="markers",
marker=dict(
size=12,
opacity=0.7,
line=dict(
width=1
),
color='blue'
),
)
)
add_layout(fig, "", 'Methylation level', f"")
fig.update_xaxes(tickangle=270)
fig.update_xaxes(tickfont_size=15)
fig.update_layout(margin=go.layout.Margin(
l=80,
r=20,
b=120,
t=50,
pad=0
))
save_figure(fig, f"{path_save}/figs/subjects/{subject}")
colors = px.colors.qualitative.Set1
groups = sorted(rel_diff_df['Group'].unique())
rel_diff_df.to_excel(f"{path_save}/rel_diff.xlsx", index=True)
fig = go.Figure()
for cpg_id, cpg in enumerate(cpgs_common):
series_i = rel_diff_df.loc[subjects_common, cpg].dropna()
series_i = series_i.astype('float64')
distrib_i = series_i.values
showlegend = False
if cpg_id == 0:
showlegend = True
fig.add_trace(
go.Violin(
x=[cpg] * len(distrib_i),
y=distrib_i,
showlegend=False,
box_visible=True,
meanline_visible=True,
line_color='black',
line=dict(width=0.35),
fillcolor='grey',
marker=dict(color='grey', line=dict(color='black', width=0.3), opacity=0.8),
points=False,
bandwidth=np.ptp(distrib_i) / 25,
opacity=0.8
)
)
for g_id, g in enumerate(groups):
series_i = rel_diff_df.loc[rel_diff_df['Group'] == g, cpg].dropna()
series_i = series_i.astype('float64')
distrib_i = series_i.values
fig.add_trace(
go.Box(
x=[cpg] * len(distrib_i),
name=g,
y=distrib_i,
boxpoints='all',
fillcolor='rgba(255,255,255,0)',
hoveron = 'points',
line = {'color': 'rgba(255,255,255,0)'},
pointpos = -2,
showlegend = showlegend,
marker=dict(size=4, color=colors[g_id], line=dict(color='black', width=0.3), opacity=0.6),
)
)
add_layout(fig, "", "Relative difference, %", f"")
fig.update_xaxes(tickangle=270)
fig.update_xaxes(tickfont_size=15)
fig.update_layout(margin=go.layout.Margin(
l=120,
r=20,
b=120,
t=50,
pad=0
))
fig.update_layout(title_xref='paper')
fig.update_layout(legend= {'itemsizing': 'constant'})
fig.update_layout(legend_font_size=20)
fig.update_layout(
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="center",
x=0.5
)
)
save_figure(fig, f"{path_save}/figs/rel_diff")
pvals = []
values_dict = {'ID': subjects_common}
for cpg_id, cpg in enumerate(cpgs_common):
values_dict[f"{cpg}_850K"] = []
values_dict[f"{cpg}_agena"] = []
epic_data = []
agena_data = []
for subject in subjects_common:
meth_epic = df_ctrl.at[subject, cpg]
epic_data.append(meth_epic)
meth_agena = agena.at[subject, cpg]
agena_data.append(meth_agena)
values_dict[f"{cpg}_850K"].append(meth_epic)
values_dict[f"{cpg}_agena"].append(meth_agena)
stat, pval = mannwhitneyu(epic_data, agena_data, alternative='two-sided')
pvals.append(pval)
values_df =
|
pd.DataFrame(values_dict)
|
pandas.DataFrame
|
import copy
import re
import warnings
import numpy as np
import pandas as pd
import xarray
import scipy.stats as st
import numba
try:
import arviz as az
import arviz.plots.plot_utils
except:
warnings.warn(
"Could not import ArviZ. Perhaps it is not installed."
" Some functionality in the viz submodule will not be available."
)
import scipy.ndimage
import matplotlib._contour
from matplotlib.pyplot import get_cmap as mpl_get_cmap
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import colorcet
try:
import holoviews as hv
import holoviews.operation.datashader
hv.extension("bokeh")
except ImportError as e:
warnings.warn(
f"""DataShader import failed with error "{e}".
Features requiring DataShader will not work and you will get exceptions."""
)
from . import utils
from . import image
try:
from . import stan
except:
warnings.warn(
"Could not import `stan` submodule. Perhaps pystan or cmdstanpy is not properly installed."
)
def confints(
summaries, p=None, marker_kwargs={}, line_kwargs={}, palette=None, **kwargs
):
"""Make a horizontal plot of centers/conf ints with error bars.
Parameters
----------
summaries : list of dicts
Each entry in `summaries` is a dictionary containing minimally
keys 'estimate', 'conf_int', and 'label'. The 'estimate' value
is the point estimate, a single scalar. The 'conf_int' value is
a two-tuple, two-list, or two-numpy array containing the low and
high end of the confidence interval for the estimate. The
'label' value is the name of the variable. This gives the label
of the y-ticks.
p : bokeh.plotting.Figure instance or None, default None
If not None, a figure to be populated with confidence interval
plot. If specified, it is important that `p.y_range` be set to
contain all of the values in the labels provided in the
`summaries` input. If `p` is None, then a new figure is created.
marker_kwargs : dict, default {}
Kwargs to be passed to p.circle() for plotting estimates.
line_kwargs : dict, default {}
Kwargs passsed to p.line() to plot the confidence interval.
palette : list, str, or None
If None, default colors (or those given in `marker_kwargs` and
`line_kwargs` are used). If a str, all glyphs are colored
accordingly, e.g., 'black'. Otherwise a list of colors is used.
kwargs : dict
Any additional kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Plot of error bars.
"""
n = len(summaries)
labels = [summary["label"] for summary in summaries]
estimates = [summary["estimate"] for summary in summaries]
conf_intervals = [summary["conf_int"] for summary in summaries]
if palette is None:
use_palette = False
else:
if (
"color" in marker_kwargs
or "line_color" in marker_kwargs
or "fill_color" in marker_kwargs
or "color" in line_kwargs
or "line_color" in line_kwargs
):
raise RuntimeError(
"`palette` must be None if color is specified in "
"`marker_kwargs` or `line_kwargs`"
)
if type(palette) == str:
marker_kwargs["color"] = palette
line_kwargs["color"] = palette
use_palette = False
elif type(palette) == list or type(palette) == tuple:
palette[:n][::-1]
use_palette = True
line_width = kwargs.pop("line_width", 3)
size = marker_kwargs.pop("size", 5)
if p is None:
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 50 * n
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
p = bokeh.plotting.figure(
y_range=labels[::-1], toolbar_location=toolbar_location, **kwargs
)
for i, (estimate, conf, label) in enumerate(zip(estimates, conf_intervals, labels)):
if use_palette:
marker_kwargs["color"] = palette[i % len(palette)]
line_kwargs["color"] = palette[i % len(palette)]
p.circle(x=[estimate], y=[label], size=size, **marker_kwargs)
p.line(x=conf, y=[label, label], line_width=line_width, **line_kwargs)
return p
def fill_between(
x1=None,
y1=None,
x2=None,
y2=None,
show_line=True,
patch_kwargs={},
line_kwargs={},
p=None,
**kwargs,
):
"""
Create a filled region between two curves.
Parameters
----------
x1 : array_like
Array of x-values for first curve
y1 : array_like
Array of y-values for first curve
x2 : array_like
Array of x-values for second curve
y2 : array_like
Array of y-values for second curve
show_line : bool, default True
If True, show the lines on the edges of the fill.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with fill-between.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
if p is None:
p = bokeh.plotting.figure(**kwargs)
line_width = patch_kwargs.pop("line_width", 0)
line_alpha = patch_kwargs.pop("line_alpha", 0)
p.patch(
x=np.concatenate((x1, x2[::-1])),
y=np.concatenate((y1, y2[::-1])),
line_width=line_width,
line_alpha=line_alpha,
**patch_kwargs,
)
if show_line:
line_width = line_kwargs.pop("line_width", 2)
p.line(x1, y1, line_width=line_width, **line_kwargs)
p.line(x2, y2, line_width=line_width, **line_kwargs)
return p
def qqplot(
samples,
data,
percentile=95,
patch_kwargs={},
line_kwargs={},
diag_kwargs={},
p=None,
**kwargs,
):
"""
Generate a Q-Q plot.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray
One-dimensional data set to use in Q-Q plot.
percentile : int or float, default 95
Which percentile to use in displaying the Q-Q plot.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the filled
region of the Q-Q plot..
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
diag_kwargs : dict
Any kwargs to be passed into p.line() in generating diagonal
reference line of Q-Q plot.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with Q-Q plot.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("`samples` can only be a Numpy array or xarray.")
if samples.ndim != 2:
raise RuntimeError(
"`samples` must be a 2D array, with each row being a sample."
)
if len(samples) < 100:
warnings.warn(
"`samples` has very few samples. Predictive percentiles may be poor."
)
if data is not None and len(data) != samples.shape[1]:
raise RuntimeError("Mismatch in shape of `data` and `samples`.")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 275
if "fill_alpha" not in patch_kwargs:
patch_kwargs["fill_alpha"] = 0.5
x = np.sort(data)
samples = np.sort(samples)
# Upper and lower bounds
low_theor, up_theor = np.percentile(
samples, (50 - percentile / 2, 50 + percentile / 2), axis=0
)
x_range = [data.min(), data.max()]
if "x_range" not in kwargs:
kwargs["x_range"] = x_range
if p is None:
p = bokeh.plotting.figure(**kwargs)
p = fill_between(
x,
up_theor,
x,
low_theor,
patch_kwargs=patch_kwargs,
line_kwargs=line_kwargs,
show_line=True,
p=p,
)
# Plot 45 degree line
color = diag_kwargs.pop("color", "black")
alpha = diag_kwargs.pop("alpha", 0.5)
line_width = diag_kwargs.pop("line_width", 4)
p.line(x_range, x_range, line_width=line_width, color=color, alpha=alpha)
return p
def _ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x=x[0], y=1, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=0, length=0, angle=0, **kwargs)
else:
p.ray(x=x[0], y=0, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=1, length=0, angle=0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p
def _histogram(
data=None,
bins="freedman-diaconis",
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
# Compute histogram
bins = _bins_to_np(data, bins)
e0, f0 = _compute_histogram(data, bins, density)
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p
def _bins_to_np(data, bins):
"""Compute a Numpy array to pass to np.histogram() as bins."""
if type(bins) == str and bins not in [
"integer",
"exact",
"sqrt",
"freedman-diaconis",
]:
raise RuntimeError("Invalid bins specification.")
if type(bins) == str and bins == "exact":
a = np.unique(data)
if len(a) == 1:
bins = np.array([a[0] - 0.5, a[0] + 0.5])
else:
bins = np.concatenate(
(
(a[0] - (a[1] - a[0]) / 2,),
(a[1:] + a[:-1]) / 2,
(a[-1] + (a[-1] - a[-2]) / 2,),
)
)
elif type(bins) == str and bins == "integer":
if np.any(data != np.round(data)):
raise RuntimeError("'integer' bins chosen, but data are not integer.")
bins = np.arange(data.min() - 1, data.max() + 1) + 0.5
elif type(bins) == str and bins == "sqrt":
bins = int(np.ceil(np.sqrt(len(data))))
elif type(bins) == str and bins == "freedman-diaconis":
h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(len(data))
if h == 0.0:
bins = 3
else:
bins = int(np.ceil((data.max() - data.min()) / h))
return bins
def _compute_histogram(data, bins, density):
"""Compute values of histogram for plotting."""
f, e = np.histogram(data, bins=bins, density=density)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
return e0, f0
def predictive_ecdf(
samples,
data=None,
diff=None,
percentiles=(95, 68),
color="blue",
data_color="orange",
data_staircase=True,
data_size=2,
x=None,
discrete=False,
p=None,
**kwargs,
):
"""Plot a predictive ECDF from samples.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray, or None
If not None, ECDF of measured data is overlaid with predictive
ECDF.
diff : 'ecdf', 'iecdf', 'eppf', or None, default None
Referring to the variable as x, if `diff` is 'iecdf' or 'eppf',
for each value of the ECDF, plot the value of x minus the median
x. If 'ecdf', plot the value of the ECDF minus the median ECDF
value. If None, just plot the ECDFs.
percentiles : list or tuple, default (95, 68)
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_color : str, default 'orange'
String representing the color of the data to be plotted over the
confidence interval envelopes.
data_staircase : bool, default True
If True, plot the ECDF of the data as a staircase.
Otherwise plot it as dots.
data_size : int, default 2
Size of marker (if `data_line` if False) or thickness of line
(if `data_staircase` is True) of plot of data.
x : Numpy array, default None
Points at which to evaluate the ECDF. If None, points are
automatically generated based on the data range.
discrete : bool, default False
If True, the samples take on discrete values.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
ECDF of the samples. The shading goes according to percentiles
of samples of the ECDF, with the median ECDF plotted as line in
the middle.
"""
if diff == True:
diff = "ecdf"
warnings.warn(
"`diff` as a Boolean is deprecated. Use 'ecdf', 'iecdf', or None."
" Using `diff = 'ecdf'`.",
DeprecationWarning,
)
elif diff == False:
diff = None
warnings.warn(
"`diff` as a Boolean is deprecated. Use 'ecdf', 'iecdf', or None."
" Using `diff = None`.",
DeprecationWarning,
)
if diff is not None:
diff = diff.lower()
if diff == "eppf":
diff = "iecdf"
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("`samples` can only be a Numpy array or xarray.")
if samples.ndim != 2:
raise RuntimeError(
"`samples` must be a 2D array, with each row being a sample."
)
if len(samples) < 100:
warnings.warn(
"`samples` has very few samples. Predictive percentiles may be poor."
)
if data is not None and len(data) != samples.shape[1]:
raise RuntimeError("Mismatch in shape of `data` and `samples`.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
samples = np.sort(samples)
n = samples.shape[1]
if data is not None:
data_plot = np.sort(np.array(data))
# y-values for ECDFs
y = np.arange(1, n + 1) / n
df_ecdf = pd.DataFrame(dict(y=y))
for ptile in ptiles:
df_ecdf[str(ptile)] = np.percentile(samples, ptile, axis=0)
# Set up plot
if p is None:
x_axis_label = kwargs.pop(
"x_axis_label", "x difference" if diff == "iecdf" else "x"
)
y_axis_label = kwargs.pop(
"y_axis_label", "ECDF difference" if diff == "ecdf" else "ECDF"
)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
# Plot the predictive intervals
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff == "ecdf":
med = df_ecdf["50"].values
x1_val = df_ecdf[ptile].values
x1_post = med[med > x1_val.max()]
x2_val = df_ecdf[ptiles_str[-i - 1]].values
x2_pre = med[med < x2_val.min()]
x1 = np.concatenate((x1_val, x1_post))
y1 = np.concatenate((y, np.ones_like(x1_post)))
y1 -= _ecdf_arbitrary_points(df_ecdf["50"].values, x1)
x2 = np.concatenate((x2_pre, x2_val))
y2 = np.concatenate((np.zeros_like(x2_pre), y))
y2 -= _ecdf_arbitrary_points(df_ecdf["50"].values, x2)
x1, y1 = cdf_to_staircase(x1, y1)
x2, y2 = cdf_to_staircase(x2, y2)
else:
if diff == "iecdf":
df_ecdf[ptile] -= df_ecdf["50"]
df_ecdf[ptiles_str[-i - 1]] -= df_ecdf["50"]
x1, y1 = cdf_to_staircase(df_ecdf[ptile].values, y)
x2, y2 = cdf_to_staircase(df_ecdf[ptiles_str[-i - 1]].values, y)
fill_between(
x1,
y1,
x2,
y2,
p=p,
show_line=False,
patch_kwargs=dict(color=colors[color][i], alpha=0.5),
)
# The median as a solid line
if diff == "ecdf":
p.ray(
x=df_ecdf["50"].min(),
y=0.0,
length=0,
angle=np.pi,
line_width=2,
color=colors[color][-1],
)
p.ray(
x=df_ecdf["50"].min(),
y=0.0,
length=0,
angle=0,
line_width=2,
color=colors[color][-1],
)
elif diff == "iecdf":
p.line([0.0, 0.0], [0.0, 1.0], line_width=2, color=colors[color][-1])
else:
x, y_median = cdf_to_staircase(df_ecdf["50"], y)
p.line(x, y_median, line_width=2, color=colors[color][-1])
p.ray(
x=x.min(),
y=0.0,
length=0,
angle=np.pi,
line_width=2,
color=colors[color][-1],
)
p.ray(
x=x.max(),
y=int(diff is None),
length=0,
angle=0,
line_width=2,
color=colors[color][-1],
)
# Overlay data set
if data is not None:
if data_staircase:
if diff == "iecdf":
data_plot -= df_ecdf["50"]
x_data, y_data = cdf_to_staircase(data_plot, y)
elif diff == "ecdf":
med = df_ecdf["50"].values
x_data = np.sort(np.unique(np.concatenate((data_plot, med))))
data_ecdf = _ecdf_arbitrary_points(data_plot, x_data)
med_ecdf = _ecdf_arbitrary_points(med, x_data)
x_data, y_data = cdf_to_staircase(x_data, data_ecdf - med_ecdf)
else:
x_data, y_data = cdf_to_staircase(data_plot, y)
p.line(x_data, y_data, color=data_color, line_width=data_size)
# Extend to infinity
if diff != "iecdf":
p.ray(
x=x_data.min(),
y=0.0,
length=0,
angle=np.pi,
line_width=data_size,
color=data_color,
)
p.ray(
x=x_data.max(),
y=int(diff is None),
length=0,
angle=0,
line_width=data_size,
color=data_color,
)
else:
if diff == "iecdf":
p.circle(data_plot - df_ecdf["50"], y, color=data_color, size=data_size)
elif diff == "ecdf":
p.circle(
data_plot,
y - _ecdf_arbitrary_points(df_ecdf["50"].values, data_plot),
color=data_color,
size=data_size,
)
else:
p.circle(data_plot, y, color=data_color, size=data_size)
return p
def predictive_regression(
samples,
samples_x,
data=None,
diff=False,
percentiles=[95, 68],
color="blue",
data_kwargs={},
p=None,
**kwargs,
):
"""Plot a predictive regression plot from samples.
Parameters
----------
samples : Numpy array, shape (n_samples, n_x) or xarray DataArray
Numpy array containing predictive samples of y-values.
sample_x : Numpy array, shape (n_x,)
data : Numpy array, shape (n, 2) or xarray DataArray
If not None, the measured data. The first column is the x-data,
and the second the y-data. These are plotted as points over the
predictive plot.
diff : bool, default True
If True, the predictive y-values minus the median of the
predictive y-values are plotted.
percentiles : list, default [95, 68]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_kwargs : dict
Any kwargs to be passed to p.circle() when plotting the data
points.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
the samples. The shading goes according to percentiles of
samples, with the median plotted as line in the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if type(samples_x) != np.ndarray:
if type(samples_x) == xarray.core.dataarray.DataArray:
samples_x = samples_x.squeeze().values
else:
raise RuntimeError("`samples_x` can only be Numpy array or xarray.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
if samples.shape[1] != len(samples_x):
raise ValueError(
"`samples_x must have the same number of entries as `samples` does columns."
)
# It's useful to have data as a data frame
if data is not None:
if type(data) == tuple and len(data) == 2 and len(data[0]) == len(data[1]):
data = np.vstack(data).transpose()
df_data = pd.DataFrame(data=data, columns=["__data_x", "__data_y"])
df_data = df_data.sort_values(by="__data_x")
# Make sure all entries in x-data in samples_x
if diff:
if len(samples_x) != len(df_data) or not np.allclose(
np.sort(samples_x), df_data["__data_x"].values
):
raise ValueError(
"If `diff == True`, then samples_x must match the x-values of `data`."
)
df_pred = pd.DataFrame(
data=np.percentile(samples, ptiles, axis=0).transpose(),
columns=[str(ptile) for ptile in ptiles],
)
df_pred["__x"] = samples_x
df_pred = df_pred.sort_values(by="__x")
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y difference" if diff else "y")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff:
y1 = df_pred[ptile] - df_pred["50"]
y2 = df_pred[ptiles_str[-i - 1]] - df_pred["50"]
else:
y1 = df_pred[ptile]
y2 = df_pred[ptiles_str[-i - 1]]
fill_between(
x1=df_pred["__x"],
x2=df_pred["__x"],
y1=y1,
y2=y2,
p=p,
show_line=False,
patch_kwargs=dict(fill_color=colors[color][i]),
)
# The median as a solid line
if diff:
p.line(
df_pred["__x"],
np.zeros_like(samples_x),
line_width=2,
color=colors[color][-1],
)
else:
p.line(df_pred["__x"], df_pred["50"], line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
data_color = data_kwargs.pop("color", "orange")
data_alpha = data_kwargs.pop("alpha", 1.0)
data_size = data_kwargs.pop("size", 2)
if diff:
p.circle(
df_data["__data_x"],
df_data["__data_y"] - df_pred["50"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
else:
p.circle(
df_data["__data_x"],
df_data["__data_y"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
return p
def sbc_rank_ecdf(
sbc_output=None,
parameters=None,
diff=True,
ptile=99.0,
bootstrap_envelope=False,
n_bs_reps=None,
show_envelope=True,
show_envelope_line=True,
color_by_warning_code=False,
staircase=False,
p=None,
marker_kwargs={},
envelope_patch_kwargs={},
envelope_line_kwargs={},
palette=None,
show_legend=True,
**kwargs,
):
"""Make a rank ECDF plot from simulation-based calibration.
Parameters
----------
sbc_output : DataFrame
Output of bebi103.stan.sbc() containing results from an SBC
calculation.
parameters : list of str, or None (default)
List of parameters to include in the SBC rank ECDF plot. If
None, use all parameters. For multidimensional parameters, each
entry must be given separately, e.g.,
`['alpha[0]', 'alpha[1]', 'beta[0,1]']`.
diff : bool, default True
If True, plot the ECDF minus the ECDF of a Uniform distribution.
Otherwise, plot the ECDF of the rank statistic from SBC.
ptile : float, default 99
Which precentile to use as the envelope in the plot.
bootstrap_envelope : bool, default False
If True, use bootstrapping on the appropriate Uniform
distribution to compute the envelope. Otherwise, use the
Gaussian approximation for the envelope.
n_bs_reps : bool, default None
Number of bootstrap replicates to use when computing the
envelope. If None, n_bs_reps is determined from the formula
int(max(n, max(L+1, 100/(100-ptile))) * 100), where n is the
number of simulations used in the SBC calculation.
show_envelope : bool, default True
If True, display the envelope encompassing the ptile percent
confidence interval for the SBC ECDF.
show_envelope_line : bool, default True
If True, and `show_envelope` is also True, plot a line around
the envelope.
color_by_warning_code : bool, default False
If True, color glyphs by diagnostics warning code instead of
coloring the glyphs by parameter
staircase : bool, default False
If True, plot the ECDF as a staircase. Otherwise, plot with
dots.
p : bokeh.plotting.Figure instance, default None
Plot to which to add the SBC rank ECDF plot. If None, create a
new figure.
marker_kwargs : dict, default {}
Dictionary of kwargs to pass to `p.circle()` or `p.line()` when
plotting the SBC ECDF.
envelope_patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill of
the envelope.
envelope_line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill of the envelope.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
show_legend : bool, default True
If True, show legend.
kwargs : dict
Any kwargs passed to `bokeh.plotting.figure()` when creating the
plot.
Returns
-------
output : bokeh.plotting.Figure instance
A plot containing the SBC plot.
Notes
-----
You can see example SBC ECDF plots in Fig. 14 b and c in this
paper: https://arxiv.org/abs/1804.06788
"""
if sbc_output is None:
raise RuntimeError("Argument `sbc_output` must be specified.")
# Defaults
if palette is None:
palette = colorcet.b_glasbey_category10
elif palette not in [list, tuple]:
palette = [palette]
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = "rank statistic"
if "y_axis_label" not in kwargs:
kwargs["y_axis_label"] = "ECDF difference" if diff else "ECDF"
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
if "fill_color" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_color"] = "gray"
if "fill_alpha" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_alpha"] = 0.5
if "line_color" not in envelope_line_kwargs:
envelope_line_kwargs["line_color"] = "gray"
if "color" in "marker_kwargs" and color_by_warning_code:
raise RuntimeError(
"Cannot specify marker color when `color_by_warning_code` is True."
)
if staircase and color_by_warning_code:
raise RuntimeError("Cannot color by warning code for staircase ECDFs.")
if parameters is None:
parameters = list(sbc_output["parameter"].unique())
elif type(parameters) not in [list, tuple]:
parameters = [parameters]
L = sbc_output["L"].iloc[0]
df = sbc_output.loc[
sbc_output["parameter"].isin(parameters),
["parameter", "rank_statistic", "warning_code"],
]
n = (df["parameter"] == df["parameter"].unique()[0]).sum()
if show_envelope:
x, y_low, y_high = _sbc_rank_envelope(
L,
n,
ptile=ptile,
diff=diff,
bootstrap=bootstrap_envelope,
n_bs_reps=n_bs_reps,
)
p = fill_between(
x1=x,
x2=x,
y1=y_high,
y2=y_low,
patch_kwargs=envelope_patch_kwargs,
line_kwargs=envelope_line_kwargs,
show_line=show_envelope_line,
p=p,
toolbar_location=toolbar_location,
**kwargs,
)
else:
p = bokeh.plotting.figure(toolbar_location=toolbar_location, **kwargs)
if staircase:
dfs = []
for param in parameters:
if diff:
x_data, y_data = _ecdf_diff(
df.loc[df["parameter"] == param, "rank_statistic"],
L,
staircase=True,
)
else:
x_data, y_data = _ecdf_vals(
df.loc[df["parameter"] == param, "rank_statistic"], staircase=True
)
dfs.append(
pd.DataFrame(
data=dict(rank_statistic=x_data, __ECDF=y_data, parameter=param)
)
)
df =
|
pd.concat(dfs, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 21:06:01 2022
@author: Nehal
"""
import streamlit as st
import pandas as pd
def app():
# @cache
@st.cache
def load_data():
#datacsv = pd.read_csv("C:/Users/Nehal/JupyterNotebooks/TheGraph_Decentraland.csv") #To load it from local
datacsv =
|
pd.read_csv("TheGraph_Decentraland.csv")
|
pandas.read_csv
|
"""
Tests for the pandas.io.common functionalities
"""
import mmap
import os
import re
import pytest
from pandas.compat import FileNotFoundError, StringIO, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
@td.skip_if_no('pathlib')
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = icom._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
@td.skip_if_no('py.path')
def test_stringify_path_localpath(self):
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = icom._stringify_path(p)
assert result == 'foo/bar.csv'
@pytest.mark.parametrize('extension,expected', [
('', None),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.zip', 'zip'),
('.xz', 'xz'),
])
@pytest.mark.parametrize('path_type', path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type('foo/bar.csv' + extension)
compression = icom._infer_compression(path, compression='infer')
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
filename)
assert filepath_or_buffer != filename
assert os.path.isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
assert not should_close
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(
input_buffer)
assert filepath_or_buffer == input_buffer
assert not should_close
def test_iterator(self):
reader = pd.read_csv(StringIO(self.data1), chunksize=1)
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = pd.read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext)
with pytest.raises(error_class):
reader(path)
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_expands_user_home_dir(self, reader, module,
error_class, fn_ext, monkeypatch):
pytest.importorskip(module)
path = os.path.join('~', 'does_not_exist.' + fn_ext)
monkeypatch.setattr(icom, '_expand_user',
lambda x: os.path.join('foo', x))
message = "".join(["foo", os.path.sep, "does_not_exist.", fn_ext])
with pytest.raises(error_class, message=re.escape(message)):
reader(path)
def test_read_non_existant_read_table(self):
path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv')
with pytest.raises(FileNotFoundError):
with tm.assert_produces_warning(FutureWarning):
pd.read_table(path)
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', ('io', 'sas', 'data', 'test1.sas7bdat')),
(pd.read_json, 'os', ('io', 'json', 'data', 'tsframe_v012.json')),
(pd.read_msgpack, 'os', ('io', 'msgpack', 'data', 'frame.mp')),
(pd.read_pickle, 'os', ('io', 'data', 'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_read_fspath_all_read_table(self, datapath):
path = datapath('io', 'data', 'iris.csv')
mypath = CustomFSPath(path)
with tm.assert_produces_warning(FutureWarning):
result = pd.read_table(mypath)
with tm.assert_produces_warning(FutureWarning):
expected = pd.read_table(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import io
import os
import re
import shutil
import string
import tempfile
import pandas as pd
import numpy as np
import psweep as ps
pj = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
# ----------------------------------------------------------------------------
# helpers
# ----------------------------------------------------------------------------
def system(cmd):
return ps.system(cmd).stdout.decode()
def func(pset):
# We need to multiply by a float here to make sure the 'result' column has
# float dtype. Else the column will be cast to float once we add NaNs,
# which breaks df.equals(other_df) .
return {"result": pset["a"] * 10.0}
# ----------------------------------------------------------------------------
# test function
# ----------------------------------------------------------------------------
def test_run_all_examples():
dr = os.path.abspath("{}/../../../examples".format(here))
for basename in os.listdir(dr):
path = pj(dr, basename)
print(f"running example: {path}")
if basename.endswith(".py"):
with tempfile.TemporaryDirectory() as tmpdir:
cmd = f"""
cp {path} {tmpdir}/; cd {tmpdir};
python3 {path}
"""
print(system(cmd))
elif os.path.isdir(path):
with tempfile.TemporaryDirectory() as tmpdir:
shutil.copytree(path, tmpdir, dirs_exist_ok=True)
cmd = f"cd {tmpdir}; ./run_example.sh; ./clean.sh"
print(system(cmd))
def test_shell_call():
print(system("ls"))
with tempfile.TemporaryDirectory() as tmpdir:
txt = "ls"
fn = pj(tmpdir, "test.sh")
ps.file_write(fn, txt)
cmd = f"cd {tmpdir}; sh test.sh"
print(system(cmd))
def test_run():
with tempfile.TemporaryDirectory() as tmpdir:
params = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
calc_dir = "{}/calc".format(tmpdir)
# run two times, updating the database, the second time,
# also write tmp results
df = ps.run_local(func, params, calc_dir=calc_dir)
assert len(df) == 4
assert len(df._run_id.unique()) == 1
assert len(df._pset_id.unique()) == 4
df = ps.run_local(
func, params, calc_dir=calc_dir, poolsize=2, tmpsave=True
)
assert len(df) == 8
assert len(df._run_id.unique()) == 2
assert len(df._pset_id.unique()) == 8
assert set(df.columns) == set(
[
"_calc_dir",
"_pset_id",
"_run_id",
"_pset_seq",
"_run_seq",
"_pset_sha1",
"_time_utc",
"a",
"result",
]
)
dbfn = "{}/database.pk".format(calc_dir)
assert os.path.exists(dbfn)
assert df.equals(ps.df_read(dbfn))
# tmp results of second run
run_id = df._run_id.unique()[-1]
for pset_id in df[df._run_id == run_id]._pset_id:
tmpsave_fn = "{calc_dir}/tmpsave/{run_id}/{pset_id}.pk".format(
calc_dir=calc_dir, run_id=run_id, pset_id=pset_id
)
assert os.path.exists(tmpsave_fn)
def test_simulate():
with tempfile.TemporaryDirectory() as tmpdir:
params = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
params_sim = [{"a": 88}, {"a": 99}]
calc_dir = "{}/calc".format(tmpdir)
calc_dir_sim = calc_dir + ".simulate"
df = ps.run_local(func, params, calc_dir=calc_dir)
df_sim = ps.run_local(
func, params_sim, calc_dir=calc_dir, simulate=True
)
dbfn = "{}/database.pk".format(calc_dir)
dbfn_sim = "{}/database.pk".format(calc_dir_sim)
assert len(df_sim) == 6
assert len(df) == 4
assert os.path.exists(dbfn)
assert os.path.exists(dbfn_sim)
assert df.equals(ps.df_read(dbfn))
assert df_sim.equals(ps.df_read(dbfn_sim))
assert df.iloc[:4].equals(df_sim.iloc[:4])
assert np.isnan(df_sim.result.values[-2:]).all()
df2 = ps.run_local(func, params_sim, calc_dir=calc_dir)
assert len(df2) == 6
assert df.iloc[:4].equals(df2.iloc[:4])
assert (df2.result.values[-2:] == np.array([880.0, 990.0])).all()
def test_is_seq():
no = [{"a": 1}, io.IOBase(), "123"]
yes = [[1, 2], {1, 2}, (1, 2)]
for obj in no:
print(obj)
assert not ps.is_seq(obj)
for obj in yes:
print(obj)
assert ps.is_seq(obj)
def test_df_io():
from pandas.testing import assert_frame_equal
letters = string.ascii_letters
ri = np.random.randint
rn = np.random.rand
# random string
rs = lambda n: "".join(letters[ii] for ii in ri(0, len(letters), n))
for fmt in ["pickle", "json"]:
df = pd.DataFrame()
for _ in range(2):
vals = [
ri(0, 100),
rs(5),
np.nan,
'"{}"'.format(rs(5)),
"'{}'".format(rs(5)),
(ri(0, 99), rn(), "{}".format(rs(5))),
[ri(0, 99), rn(), "{}".format(rs(5))],
rn(),
rn(5),
rn(5, 5),
list(rn(5)),
{"a": 1, "b": 3, "c": [1, 2, 3]},
]
if fmt == "pickle":
vals += [
True,
False,
None,
set(ri(0, 99, 10)),
]
row = pd.DataFrame([dict(zip(letters, vals))])
df = df.append(row, ignore_index=True)
if fmt == "json":
for orient in [
None,
"split",
"records",
"index",
"columns",
"_default_",
]:
print("orient: ", orient)
with tempfile.NamedTemporaryFile() as fd:
if orient != "_default_":
ps.df_write(df, fd.name, fmt=fmt, orient=orient)
read = ps.df_read(fd.name, fmt=fmt, orient=orient)
else:
ps.df_write(df, fd.name, fmt=fmt)
read = ps.df_read(fd.name, fmt=fmt)
assert_frame_equal(df, read, check_exact=False)
elif fmt == "pickle":
with tempfile.NamedTemporaryFile() as fd:
ps.df_write(df, fd.name, fmt=fmt)
read = ps.df_read(fd.name, fmt=fmt)
assert_frame_equal(df, read)
else:
raise Exception("unknown fmt")
def test_save():
with tempfile.TemporaryDirectory() as tmpdir:
params = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
calc_dir = "{}/calc".format(tmpdir)
dbfn = "{}/database.pk".format(calc_dir)
df = ps.run_local(func, params, calc_dir=calc_dir, save=False)
assert not os.path.exists(dbfn)
assert os.listdir(tmpdir) == []
ps.run_local(func, params, calc_dir=calc_dir, save=True)
assert os.path.exists(dbfn)
assert os.listdir(tmpdir) != []
def test_merge_dicts():
a = {"a": 1}
b = {"b": 2}
c = {"c": 3}
# API
m1 = ps.merge_dicts(a, b, c)
m2 = ps.merge_dicts([a, b, c])
# correct merge
assert set(m1.keys()) == set(m2.keys())
assert set(m1.values()) == set(m2.values())
assert set(m1.keys()) == set(("a", "b", "c"))
assert set(m1.values()) == set((1, 2, 3))
# left-to-right
a = {"a": 1}
b = {"a": 2}
c = {"a": 3}
m = ps.merge_dicts(a, b)
assert m == {"a": 2}
m = ps.merge_dicts(a, b, c)
assert m == {"a": 3}
def test_scripts():
with tempfile.TemporaryDirectory() as tmpdir:
params = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
calc_dir = "{}/calc".format(tmpdir)
ps.run_local(func, params, calc_dir=calc_dir)
db = pj(calc_dir, "database.pk")
print(system(f"psweep-db2json -o columns {db}"))
print(system(f"psweep-db2table -i -a -f simple {db}"))
def test_backup():
def func(pset):
# write stuff to calc_dir
dr = pj(pset["_calc_dir"], pset["_pset_id"])
ps.makedirs(dr)
fn = pj(dr, "foo")
with open(fn, "w") as fd:
fd.write(pset["_pset_id"])
return {"result": pset["a"] * 10}
with tempfile.TemporaryDirectory() as tmpdir:
calc_dir = pj(tmpdir, "calc")
params = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
# First run. backup does nothing yet
df0 = ps.run_local(func, params, calc_dir=calc_dir)
unq = df0._run_id.unique()
assert len(unq) == 1
run_id_0 = unq[0]
# Second run. This time, test backup.
df1 = ps.run_local(func, params, calc_dir=calc_dir, backup=True)
rex = re.compile(r"calc.bak_[0-9-]+T[0-9:\.]+Z_run_id.+")
found = False
files = os.listdir(tmpdir)
for name in os.listdir(tmpdir):
if rex.search(name) is not None:
backup_dir = pj(tmpdir, name)
found = True
break
assert found, f"backup dir matching {rex} not found in:\n{files}"
print(os.listdir(backup_dir))
msk = df1._run_id == run_id_0
assert len(df1[msk]) == len(df0)
assert len(df1) == 2 * len(df0)
assert len(df1._run_id.unique()) == 2
for pset_id in df1[msk]._pset_id:
tgt = pj(backup_dir, pset_id, "foo")
assert os.path.exists(tgt)
with open(tgt) as fd:
fd.read().strip() == pset_id
assert os.path.exists(pj(backup_dir, "database.pk"))
def test_pass_df_interactive():
def df_cmp(dfa, dfb):
assert (dfa.a.values == dfb.a.values).all()
assert (dfa.result.values == dfb.result.values).all()
assert (dfa._pset_seq.values == dfb._pset_seq.values).all()
assert (dfa._run_seq.values == dfb._run_seq.values).all()
assert (dfa._pset_sha1.values == dfb._pset_sha1.values).all()
with tempfile.TemporaryDirectory() as tmpdir:
calc_dir = pj(tmpdir, "calc")
params = ps.plist("a", [1, 2, 3, 4])
# no db disk write for now, test passing in no df, df=None and empty df
df1_1 = ps.run_local(func, params, calc_dir=calc_dir, save=False)
df1_2 = ps.run_local(
func, params, calc_dir=calc_dir, save=False, df=None
)
df_cmp(df1_1, df1_2)
df1_3 = ps.run_local(
func, params, calc_dir=calc_dir, save=False, df=
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import lib.similarities.n0similarities as n0
def get_standard_merged(F1, F2):
#berikan strip untuk text yg kosong
F1 = F1.fillna('-')
F2 = F2.fillna('-')
#buat dataframe baru berisikan seluruh kombinasi row
F1['key'] = 1
F2['key'] = 1
FF =
|
pd.merge(F1,F2,on='key')
|
pandas.merge
|
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-04'), 44.44]
], columns=['date', 'price']).set_index('date')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd))
def test_create_ts_pd_datetime(self):
data_pd = pd.DataFrame([
['2019-01-01 11:11:11', 11.11],
['2019-01-04 04:44:44', 44.44],
['2019-01-03 03:33:33', 33.33],
['2019-01-02 22:22:22', 22.22]
], columns=['datetime', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-01-02 22:22:22'), 22.22],
[pd.Timestamp('2019-01-03 03:33:33'), 33.33],
[pd.Timestamp('2019-01-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd, index_col=DATETIME))
def test_add_ymd(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-02-02'), 22.22],
[pd.Timestamp('2019-03-03'), 33.33],
[pd.Timestamp('2019-04-04'), 44.44]
], columns=['date', 'price']).set_index('date')
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11, 2019, 1, 1],
[pd.Timestamp('2019-02-02'), 22.22, 2019, 2, 2],
[pd.Timestamp('2019-03-03'), 33.33, 2019, 3, 3],
[pd.Timestamp('2019-04-04'), 44.44, 2019, 4, 4]
], columns=['date', 'price', 'year', 'month', 'day']).set_index('date')
assert_frame_equal(expect_pd, preprocess.add_ymd(data_pd))
def test_add_ymd_datetime(self):
data_pd = pd.DataFrame([
[
|
pd.Timestamp('2019-01-01 11:11:11')
|
pandas.Timestamp
|
import pandas as pd
# UK Gov Covid data downloaded on 20th Oct 2021
COVID_DATA = 'covid_data_overview_2021-10-20.csv'
# Data from the 19th Oct 2021 - only covers 2020 - so want to predict 2021 behaviour
MOBILITY_CHANGE = '2020_GB_Region_Mobility_Report_2021-10-19.csv'
# Load the UK Covid overview data
print(f'Loading - {COVID_DATA}')
df1 = pd.read_csv(COVID_DATA)
# Drop columns that don't provide any additional data
df1.drop(['areaCode',
'areaName',
'areaType',
'newPeopleVaccinatedFirstDoseByPublishDate',
'newPeopleVaccinatedSecondDoseByPublishDate'],
axis='columns',
inplace=True)
# Set the date column to be a datetime column
df1['date'] = pd.to_datetime(df1['date'])
# Sort the rows into ascending date order
df1.sort_values(by=["date"], ignore_index=True, inplace=True)
# Extract 2021 data for use in testing classifiers and save to file
date_mask_2021 = (df1['date'] >= '2021-01-01') & (df1['date'] <= '2021-12-31')
df_2021 = df1.loc[date_mask_2021]
df_2021.to_csv('covid_data_2021_only.csv', index=False)
# Want to select 2020-02-15 to the 2020-12-31 in terms of dates
# Set up a mask to indicate the date selection for 2020 and 2021
date_mask = (df1['date'] >= '2020-02-15') & (df1['date'] <= '2020-12-31')
# Select all the rows that meet the mask search criteria
df1 = df1.loc[date_mask]
# Replace missing values with zeros for hospitalCases
df1['hospitalCases'] = df1['hospitalCases'].fillna(0)
df1['newAdmissions'] = df1['newAdmissions'].fillna(0)
print(f'Loading - {MOBILITY_CHANGE}')
# Load the google Mobility data for the UK
df2 =
|
pd.read_csv(MOBILITY_CHANGE, low_memory=False)
|
pandas.read_csv
|
## Load standard libraries
# import matplotlib.pyplot as plt
# import numpy as np
# import pandas as pd
# import seaborn as sns
# from scipy import stats
# from sklearn.preprocessing import StandardScaler
# import warnings
# warnings.filterwarnings('ignore')
# from IPython.core.display import display, HTML
# plt.rcParams["figure.figsize"] = (12,8)
# ## Load auxiliar packages
# try:
# import sidetable
# except:
# %pip install sidetable
# import sidetable
# importar pgs da wikipedia
# try:
# import wikipedia as wp
# except:
# !pip install wikipedia
# import wikipedia as wp
# wp.set_lang("pt")
# Dashboards
# Static figures to show on GitHub
# !pip install kaleido==0.0.1
# !pip install psutil==5.7.2
# # !pip install plotly==4.9.0
# !pip install -U plotly
# import plotly.express as px
# from IPython.core.display import display, HTML
# https://stackoverflow.com/questions/7261936/convert-an-excel-or-spreadsheet-column-letter-to-its-number-in-pythonic-fashion
excel_col_name = lambda n: '' if n <= 0 else excel_col_name((n - 1) // 26) + chr((n - 1) % 26 + ord('A'))
excel_col_num = lambda a: 0 if a == '' else 1 + ord(a[-1]) - ord('A') + 26 * excel_col_num(a[:-1])
def minimum_example_df():
""" generate minimum example
src: https://stackoverflow.com/questions/20109391/how-to-make-good-reproducible-pandas-examples
# ex:
df = minimum_example_df()
# other ideas
# stocks = pd.DataFrame({
# 'ticker':np.repeat( ['aapl','goog','yhoo','msft'], 50 ),
# 'date':np.tile( pd.date_range('1/1/2011', periods=50, freq='D'), 4 ),
# 'price':(np.random.randn(200).cumsum() + 10) })
"""
import numpy as np
import pandas as pd
np.random.seed(123)
df = pd.DataFrame({
# some ways to create random data
'a':np.random.randn(6),
'b':np.random.choice( [5,7,np.nan], 6),
'c':np.random.choice( ['panda','python','shark'], 6),
# some ways to create systematic groups for indexing or groupby
# this is similar to r's expand.grid(), see note 2 below
'd':np.repeat( range(3), 2 ),
'e':np.tile( range(2), 3 ),
# a date range and set of random dates
'f':pd.date_range('1/1/2011', periods=6, freq='D'),
'g':np.random.choice( pd.date_range('1/1/2011', periods=365,
freq='D'), 6, replace=False)
})
return df
def move_legend(ax, new_loc, **kws):
"""
moves a sns legend
# examples:
# move_legend(ax, "upper left")
# out of the graph area
move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
# src: https://github.com/mwaskom/seaborn/issues/2280
"""
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
ax.legend(handles, labels, loc=new_loc, title=title, **kws)
pass
def display_side_by_side(dfs:list, captions:list):
"""Display tables side by side to save vertical space
Input:
dfs: list of pandas.DataFrame
captions: list of table captions
[Thanks <NAME>](https://stackoverflow.com/questions/38783027/jupyter-notebook-display-two-pandas-tables-side-by-side)
# Minimum example
import pandas as pd
col = ['A','B','C','D']
df1 = pd.DataFrame(np.arange(12).reshape((3,4)),columns=col)
df2 = pd.DataFrame(np.arange(16).reshape((4,4)),columns=col)
display_side_by_side([df1, df2], ['DF1', 'DF2'])
# output:
DF1 DF2
A B C D A B C D
0 0 1 2 3 0 0 1 2 3
1 4 5 6 7 1 4 5 6 7
2 8 9 10 11 2 8 9 10 11
3 12 13 14 15
"""
output = ""
combined = dict(zip(captions, dfs))
for caption, df in combined.items():
output += df.style.set_table_attributes("style='display:inline'") \
.set_caption(caption)._repr_html_()
output += "\xa0\xa0\xa0"
display(HTML(output))
pass
def summarize_data(df, max_categories=10):
""" Summarize tables (repetition patterns)
# Minimum example
import pandas as pd
df = pd.DataFrame([[1, 2], [1, 3], [4, 6]], columns=['A', 'B'])
df_list, capt_list = summarize_data(df, max_categories=2)
df_list, capt_list
# output:
([ A count percent cumulative_count cumulative_percent
0 1 2 66.666667 2 66.666667
1 4 1 33.333333 3 100.000000,
B_Repetitions count percent cumulative_count cumulative_percent
0 1 3 100.0 3 100.0],
['A', 'B_Repetitions'])
"""
try:
import sidetable
except:
# %pip install sidetable
# !pip install sidetable
import sidetable
df_list=[]
capt_list=[]
for col in list(df.columns):
df_freq = df.stb.freq([col])
if len(df_freq) > max_categories:
col_name = col + '_Repetitions'
df_freq.rename(columns={'count':col_name}, inplace=True)
# display(df_freq.stb.freq([col_name], style=True))
capt_list.append(col_name)
df_list.append(df_freq.stb.freq([col_name]))
else:
# display(df.stb.freq([col], style=True))
capt_list.append(col)
df_list.append(df_freq)
return df_list, capt_list
def plot_data(df, max_categories=10, plot_col=2, size=6):
"""
Plot histograms and bar charts
if unique_values > max_categories:
histograms with Repetitions
else:
barplots with Ocurrences
Args:
df (pandas Dataframe): data in a tab form
max_categories (int, optional): threshold to barplot. Defaults to 10.
plot_col (int, optional): number of figures per row. Defaults to 2.
size (int, optional): dimension of the figure. Defaults to 6.
# Example
df = minimum_example_df()
plot_data(df, max_categories=10, plot_col=3, size=6)
"""
import matplotlib.pyplot as plt
plot_row = df.shape[1] // plot_col# + df.shape[1] % plot_col
if df.shape[1] % plot_col != 0:
plot_row += 1
fig, axes = plt.subplots(plot_row, plot_col,
figsize=(size*plot_col,size*plot_row/2))
count = 0
for col in list(df.columns):
ax_row = count // plot_col
ax_col = count % plot_col
try:
ax = axes[ax_row, ax_col]
except: # single row
ax = axes[ax_col]
df_freq = df.stb.freq([col])
if len(df_freq) > max_categories:
col_name = col + '_Repetitions'
df_freq.rename(columns={'count':col_name}, inplace=True)
if len(df_freq[col_name].unique()) > max_categories:
df_freq.hist(
column=col_name
, ax=ax
, sharex=False
, sharey=False
)
else:
df_freq[col_name].value_counts().plot.bar(
ax=ax
, rot=0
)
ax.set_title(col_name)
else:
df[col].value_counts().plot.bar(
ax=ax
, rot=90
)
ax.set_title(col)
count += 1
fig.tight_layout()
pass
def func(pct, total):
"""
Format label to pie chart: pct% (occurence/total)
"""
ocurrence = int(pct/100*total)
return "{:.1f}%\n({:d}/{:d})".format(pct, ocurrence, total)
def plot_pie_data(df, max_categories=5, plot_col=2, size=6):
""" Plot pie charts
if unique_values > max_categories:
pie chart
else:
pass
Args:
df (pandas Dataframe): data in a tab form
max_categories (int, optional): threshold to pie plot. Defaults to 5.
plot_col (int, optional): number of figures per row. Defaults to 2.
size (int, optional): dimension of the figure. Defaults to 6.
# Example:
df = minimum_example_df()
plot_pie_data(df, max_categories=5, plot_col=2, size=6)
"""
import matplotlib.pyplot as plt
import numpy as np
nr_plots = 0
list_col = list(df.columns)
mask = np.array([False]*len(list_col))
count = 0
for col in list_col:
if len(list(df[col].unique())) < max_categories:
nr_plots += 1
mask[count] = True
count += 1
pie_list = [i for (i, v) in zip(list_col, mask) if v]
plot_row = nr_plots // plot_col + nr_plots % plot_col
fig, axes = plt.subplots(plot_row, plot_col,
figsize=(size*plot_col,size*plot_row/2))
count = 0
for col in pie_list:
ax_row = count // plot_col
ax_col = count % plot_col
ax = axes[ax_row, ax_col]
wedges, texts, autotexts = ax.pie(
df[col].value_counts()
, autopct=lambda pct: func(pct, len(df))
, textprops=dict(color="w")
)
ax.set_title(col)
ax.legend(wedges
, list(df[col].unique())
#, title=col
, loc="center left"
, bbox_to_anchor=(1, 0, 0.5, 1)
)
plt.setp(
autotexts
, size=10
, weight="bold"
)
count += 1
fig.tight_layout()
pass
def compare_two_variables(col1, col2, df):
"""
col1, col2: strings with columns names to be compared
df: dataframe
Method:
compute value_counts distribution
out:
histogram(col1)
plot(col1, col2)
dataframe
without return
Example:
df = minimum_example_df()
compare_two_variables(col1='c', col2='d', df=df)
Out:
index_x c index_y d
0 python 2 2 2
1 shark 2 1 2
2 panda 2 0 2
"""
fig, (ax1,ax2) = plt.subplots(1, 2)
# Correlation plot
ax1.plot(
df[col1].value_counts()
, df[col2].value_counts()
);
# Histogram
df[col1].value_counts().hist(ax=ax2);
fig.set_size_inches(12,4)
# Compare value counts
df1 = df[col1].value_counts().reset_index()
df2 = df[col2].value_counts().reset_index()
display(pd.merge(df1, df2, left_index=True, right_index=True))
pass
def plot_dist_qq_box(df, col, fit_legend='normal_fit'):
"""
Example 1:
import numpy as np
plot_dist_qq_box(np.arange(100), )
Example 2:
import pandas as pd
df = pd.DataFrame([[1, 2], [1, 3], [4, 6]], columns=['A', 'B'])
plot_dist_qq_box(df, col='A')
"""
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.core.pylabtools import figsize
from statsmodels.graphics.gofplots import qqplot
figsize(12, 8)
#TODO: deprecated distplot
variable_to_plot = df[col]
sns.set()
fig, axes = plt.subplots(2, 2)
l1 = sns.distplot(
variable_to_plot
, fit=stats.norm
, kde=False
, ax=axes[0,0]
)
# l1 = sns.histplot(
# variable_to_plot
# #, fit=sct.norm
# , kde=True
# , ax=axes[0,0]
# )
l2 = sns.boxplot(
variable_to_plot
, orient='v'
, ax=axes[0,1]
)
l3 = qqplot(
variable_to_plot
, line='s'
, ax=axes[1,0]
)
l4 = sns.distplot(
variable_to_plot
, fit=stats.norm
, hist=False
, kde_kws={"shade": True}
, ax=axes[1,1]
)
# l4 = sns.kdeplot(
# variable_to_plot
# #, fit=sct.norm
# #, hist=False
# #, kde_kws={"shade": True}
# , ax=axes[1,1]
# )
axes[0,0].legend((fit_legend,'distribution'))
axes[1,0].legend(('distribution',fit_legend))
axes[1,1].legend((fit_legend,'kde_gaussian'))
xlabel = col
axes[0,0].set_xlabel(xlabel)
axes[0,1].set_xlabel(xlabel)
axes[1,1].set_xlabel(xlabel)
plt.show()
pass
def plot_dist_qq_box1(df, feature_name):
"""
input: df (pd.DataFrame); feature_name (string)
output: 3 plots
1) distribution normal plot (Gaussian Kernel)
2) qqplot (imput nan with mean)
3) boxplot
Thanks to [<NAME>](https://www.kaggle.com/datafan07/titanic-eda-and-several-modelling-approaches)
Example:
import pandas as pd
df = pd.DataFrame([[1, 2], [1, 3], [4, 6]], columns=['A', 'B'])
plot_dist_qq_box1(df, feature_name='A')
"""
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MaxNLocator
# Creating a customized chart. and giving in figsize and everything.
fig = plt.figure(
constrained_layout=True
, figsize=(12, 8)
)
# Creating a grid of 3 cols and 3 rows.
grid = gridspec.GridSpec(ncols=3, nrows=3, figure=fig)
# Customizing the histogram grid.
ax1 = fig.add_subplot(grid[0, :2])
ax1.set_title('Histogram')
# Plot the histogram.
sns.distplot(
df.loc[:, feature_name]
, hist=True
, kde=True
, fit=stats.norm
, ax=ax1
, color='#e74c3c'
)
ax1.legend(labels=['Normal', 'Actual'])
# Customizing the QQ_plot.
ax2 = fig.add_subplot(grid[1, :2])
ax2.set_title('Probability Plot')
# Plotting the QQ_Plot.
stats.probplot(
df.loc[:, feature_name].fillna(np.mean(df.loc[:, feature_name]))
, plot=ax2
)
ax2.get_lines()[0].set_markerfacecolor('#e74c3c')
ax2.get_lines()[0].set_markersize(12.0)
# Customizing the Box Plot.
ax3 = fig.add_subplot(grid[:, 2])
ax3.set_title('Box Plot')
# Plotting the box plot.
sns.boxplot(
df.loc[:, feature_name]
, orient='v'
, ax=ax3
, color='#e74c3c'
)
ax3.yaxis.set_major_locator(MaxNLocator(nbins=24))
plt.suptitle(f'{feature_name}', fontsize=24)
pass
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=False,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
"""
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is False.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
# Thanks [DTrimarchi10](https://github.com/DTrimarchi10/confusion_matrix/blob/master/cf_matrix.py)
Example:
from sklearn.metrics import confusion_matrix
y_true = [2, 0, 2, 2, 0, 1]
y_pred = [0, 0, 2, 2, 0, 2]
cf = confusion_matrix(y_true, y_pred)
make_confusion_matrix(cf)
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = [
"{0:.2%}".format(value)
for value
in cf.flatten()/np.sum(cf)
]
else:
group_percentages = blanks
box_labels = [
f"{v1}{v2}{v3}".strip()
for v1, v2, v3
in zip(group_labels,group_counts,group_percentages)
]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
tn, fp, fn, tp = cf.ravel()
# precision = cf[1,1] / sum(cf[:,1])
precision = tp/(tp+fp)
# recall = cf[1,1] / sum(cf[1,:])
recall = tp/(tp+fn)
#f1_score = 2*precision*recall/(precision+recall)
f1_score = 2*tp/(2*tp+fp+fn)
mcc = (tp*tn-fp*fn)/np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}\nMCC={:0.3f}".format(
accuracy, precision, recall, f1_score, mcc)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(
cf
, annot=box_labels
, fmt=""
, cmap=cmap
, cbar=cbar
, xticklabels=categories
, yticklabels=categories
)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
pass
def convert_str2int(df, start_col=1):
"""
given a df with string columns
convert it to float columns
trim spaces
remove leading zeros
from the start_col on
"""
for col in df.columns[start_col:]:
try:
df.loc[:,col] = df[col].str.replace("\s","") # trim spaces
df.loc[:,col] = df[col].str.replace(".","") # remove points
df.loc[:,col] = df[col].str.lstrip('0') # remove leading zeros
# df[col] = df[col].astype(int)
df.loc[:,col] = pd.to_numeric(df[col], errors='coerce')
# df[col] = df[col].replace(np.nan, 0, regex=True)
# df[col] = df[col].astype(int)
# df[col] = df[col].replace(0, np.nan)
except:
pass
return df
def check_equal_means(statistic, p_value, alpha=0.05):
"""Compare two means, print statistic
Args:
statistic (float]): [description]
p_value (float): [description]
alpha (float, optional): Significance Level. Defaults to 0.05.
Returns:
boolean: True if the two means seem equal else False
"""
print('Statistics=%.3f, p_value=%.3f' % (statistic, p_value))
if p_value <= alpha/2:
means_seems_equal = False
print('Sample means not look equal (reject H0)')
else:
means_seems_equal = True
print('Sample means look equal (fail to reject H0)')
return means_seems_equal
def check_normality(statistic, p_value, alpha=0.05):
""" Statistical report if the variables informed seems Gaussian/Normal
Args:
statistic (float): [description]
p_value (float): [description]
alpha (float, optional): significance level. Defaults to 0.05.
Returns:
(boolean): True if Normal else False
"""
print('Statistics=%.3f, p_value=%.3f' % (statistic, p_value))
if p_value <= alpha:
seems_normal = False
print('Sample does not look Gaussian (reject H0)')
else:
seems_normal = True
print('Sample looks Gaussian (fail to reject H0)')
return seems_normal
def anderson_darling_normality_test(result):
"""
Statistical report if the variables informed seems Gaussian/Normal
accordingly to Anderson Darling Normality Test
plot Significance Level x Critical Values
Args:
result (Scipy object): Object from Scipy
Returns:
boolean: True if Normal else False
"""
import matplotlib.pyplot as plt
print('Statistic: %.3f' % result.statistic)
p = 0
is_normal = True
for i in range(len(result.critical_values)):
sl, cv = result.significance_level[i], result.critical_values[i]
if result.statistic < result.critical_values[i]:
print('%.3f: %.3f, data looks normal (fail to reject H0)'% (sl, cv))
else:
print('%.3f: %.3f, data does not look normal (reject H0)'% (sl, cv))
is_normal = False
plt.scatter(
result.significance_level
,result.critical_values
)
plt.xlabel('Significance Level')
plt.ylabel('Critical Values')
plt.title("Anderson-Darling Normality Test")
return is_normal
def print_check_normality_multiple_tests(data):
"""
Reports 4 normality tests and summarizes in a dataframe
1) Shapiro-Wilk
2) Jarque-Bera
3) D'Agostino-Pearson or D'Agostino K^2
4) Anderson-Darling
Args:
data ([np.array or pd.Series]): [description]
Returns:
pd.DataFrame: Summarize results from tests
Example:
import numpy as np
print_check_normality_multiple_tests(np.arange(100))
Out:
Shapiro-Wilk Normality Test
Statistics=0.955, p_value=0.002
Sample does not look Gaussian (reject H0)
Jarque-Bera Normality Test
Statistics=6.002, p_value=0.050
Sample does not look Gaussian (reject H0)
D'Agostino-Pearson Normality Test
Statistics=34.674, p_value=0.000
Sample does not look Gaussian (reject H0)
Statistics=34.674, p_value=0.000
Sample does not look Gaussian (reject H0)
Anderson-Darling Normality Test
Statistic: 1.084
15.000: 0.555, data does not look normal (reject H0)
10.000: 0.632, data does not look normal (reject H0)
5.000: 0.759, data does not look normal (reject H0)
2.500: 0.885, data does not look normal (reject H0)
1.000: 1.053, data does not look normal (reject H0)
Statistics=34.674, p_value=0.000
Sample does not look Gaussian (reject H0)
Method Is_Normal
0 Shapiro-Wilk False
1 Jarque-Bera False
2 D'Agostino-Pearson False
3 Anderson-Darling False
"""
import pandas as pd
from scipy import stats
# Shapiro-Wilk
print("Shapiro-Wilk Normality Test")
statistic, p_value = stats.shapiro(data)
is_normal_shapiro_wilk = check_normality(statistic, p_value)
# Jarque-Bera
print("\nJarque-Bera Normality Test")
statistic, p_value = stats.jarque_bera(data)
is_normal_jarque_bera = check_normality(statistic, p_value)
# D'Agostino-Pearson or D'Agostino K^2
# check skew: pushed left or right (asymmetry)
# check kurtosis: how much is in the tail
print("\nD'Agostino-Pearson Normality Test")
statistic, p_value = stats.normaltest(data)
check_normality(statistic, p_value)
is_normal_dagostino_pearson = check_normality(statistic, p_value)
# Anderson-Darling
print("\nAnderson-Darling Normality Test")
result = stats.anderson(data, dist='norm')
anderson_darling_normality_test(result)
is_normal_anderson_darling = check_normality(statistic, p_value)
is_normal = {"Method": ["Shapiro-Wilk",
"Jarque-Bera",
"D'Agostino-Pearson",
"Anderson-Darling"],
'Is_Normal': [is_normal_shapiro_wilk,
is_normal_jarque_bera,
is_normal_dagostino_pearson,
is_normal_anderson_darling]
}
return pd.DataFrame(data=is_normal)
def get_sample(df, col_name, n=100, seed=42):
"""Get a sample from a column of a dataframe.
It drops any numpy.nan entries before sampling. The sampling
is performed without replacement.
Example of numpydoc for those who haven't seen yet.
Parameters
----------
df : pandas.DataFrame
Source dataframe.
col_name : str
Name of the column to be sampled.
n : int
Sample size. Default is 100.
seed : int
Random seed. Default is 42.
Returns
-------
pandas.Series
Sample of size n from dataframe's column.
Example:
import pandas as pd
df = pd.DataFrame({
'numbers_1to100': np.arange(100)})
get_sample(df=df, col_name='numbers_1to100', n=10, seed=40)
Out:
79 79
75 75
63 63
15 15
38 38
11 11
40 40
45 45
39 39
62 62
Name: numbers_1to100, dtype: int32
"""
import numpy as np
np.random.seed(seed)
random_idx = np.random.choice(
df[col_name].dropna().index
, size=n
, replace=False
)
return df.loc[random_idx, col_name]
def plot_top(df, col, ax, n=10, normalize=True):
""" Plot nlargest frequencies in a dataframe column
Args:
df (pandas Dataframe): data in a tab form
col (str): name of the column to plot
ax (axis object): axis to plot the figure
n (int, optional): Number of results to plot. Defaults to 10.
normalize (bool, optional): pct if True else absolute. Defaults to True.
Ex:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.DataFrame({
'c':np.random.choice( [100,5,3], 100)
})
fig, ax = plt.subplots()
plot_top(df=df, col='c', ax=ax, n=2, normalize=False)
"""
df[col].value_counts(
normalize=normalize
).nlargest(n).plot.barh(
x=col
, legend=False
, ax=ax
, title=f'Top {n} - {col}'
)
ax.invert_yaxis()
if normalize:
ax.axis(xmin=0, xmax=1)
pass
def df_total_row_col(df,row=True,col=True):
""" Add total row and col to a daftaframe
"""
df_out = df.copy()
df_out.loc['Column_Total']= df_out.sum(numeric_only=True, axis=0)
df_out.loc[:,'Row_Total'] = df_out.sum(numeric_only=True, axis=1)
return df_out
def cross_tab(df, col1, col2, total_row_col=True, plot=True, table=True):
""" Compare two (categorical) variables by a stacked barplot and cross table
Args:
df (pandas Dataframe): data in a tab form
col_1 (str): name of the first column to compare (x_axis)
col_2 (str): name of the second column to compare (y_axis)
Returns:
(pandas Dataframe): cross table with data between col1 and col2
Thanks [<NAME>](https://adataanalyst.com/data-analysis-resources/visualise-categorical-variables-in-python/)
Ex:
import pandas as pd
df = pd.DataFrame({
'ABC':np.random.choice( ['a','b','c'], 100),
'DEF':np.random.choice( ['d','e','f'], 100)
})
df_cross = cross_tab(df, col1='ABC', col2='DEF')
Out:
DEF d e f
ABC
a 17 12 9
b 8 18 12
c 7 10 7
"""
import pandas as pd
import matplotlib.pyplot as plt
cross_col1_col2 = pd.crosstab(
index=df[col1]
, columns=df[col2]
)
if plot:
cross_col1_col2.plot(
kind="bar"
, stacked=True
)
ax = plt.gca()
move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
# plt.show()
cross_col1_col2 = df_total_row_col(cross_col1_col2,row=True,col=True)
if table:
display(cross_col1_col2)
return cross_col1_col2
def sorted_box_plot(by_col, plot_col, df, hue=False):
"""
#TODO: WRITE DOCSTRING
#TODO: BUILD TEST
# import seaborn as sns
# import matplotlib.pyplot as plt
# import numpy as np
# tips = sns.load_dataset("tips")
# df = pd.DataFrame({
# 'ABC':np.random.choice( ['a','b','c'], 100),
# 'DEF':np.random.choice( ['d','e','f'], 100)
# })
# sorted_box_plot(by_col='ABC', plot_col='DEF', df=df, hue=False)
"""
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(10, 8))
lst_order = sorted(list(dct[by_col].keys()))
if hue:
ax = sns.boxplot(
x=by_col
, y=plot_col
, data=df
, order=lst_order
, hue=hue
)
handles, _ = ax.get_legend_handles_labels()
ax.legend(handles, dct[hue].values(), title=descr[hue])
else:
ax = sns.boxplot(
x=by_col
, y=plot_col
, data=df
, order=lst_order
)
ax.set_xticklabels([dct[by_col][k] for k in lst_order])
plt.xticks(rotation='vertical')
plt.title(descr[plot_col] + " por " + descr[by_col])
plt.xlabel(descr[by_col])
plt.ylabel(descr[plot_col])
plt.show()
pass
def plot_missing_data(df, lst, name, figsize):
"""
Detecting missing data:
yellow means no available data, black means we have data
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(f'{name} Missing Values')
sns.heatmap(
df[lst].T.isna()
, cbar=False
, cmap='magma'
)
xtl=[item.get_text()[:7] for item in ax.get_xticklabels()]
_=ax.set_xticklabels(xtl)
plt.xticks(rotation=90)
plt.show()
return df[lst].isna().sum().to_frame(name=name.lower())
def check_miss_data(df):
import pandas as pd
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
return missing_data
def get_initial_dates(df, index, data_started_lin=0):
"""
Args:
df: pd.DataFrame
index: col with dates
data_started_lin: int where data starts (raw data)
Make list from repeated values
https://stackoverflow.com/questions/31796973/pandas-dataframe-combining-one-columns-values-with-same-index-into-list
"""
missing_data = check_miss_data(df)
missing_data.index.name = "Name"
initial_dates_agg = missing_data.reset_index().groupby(['Total'])['Name'].unique().to_frame()
initial_dates_agg['date'] = df[index].iloc[initial_dates_agg.index].dt.strftime('%m/%Y')
initial_dates_agg['initial_lin'] = initial_dates_agg.index+data_started_lin
initial_dates_agg.set_index("date",inplace=True)
return initial_dates_agg
def display_full(x):
"""
Non-truncated pandas
https://stackoverflow.com/questions/25351968/how-to-display-full-non-truncated-dataframe-information-in-html-when-convertin
"""
import pandas as pd
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
pd.set_option('display.max_colwidth', None)
display(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
pd.reset_option('display.width')
pd.reset_option('display.float_format')
pd.reset_option('display.max_colwidth')
pass
def print_full(x):
"""
Non-truncated pandas
https://stackoverflow.com/questions/25351968/how-to-display-full-non-truncated-dataframe-information-in-html-when-convertin
"""
import pandas as pd
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
pd.set_option('display.max_colwidth', None)
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
pd.reset_option('display.width')
pd.reset_option('display.float_format')
pd.reset_option('display.max_colwidth')
pass
def plot_heatmap_nr(corr_mat, figsize=(6, 6)):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
mask = np.triu(np.ones_like(corr_mat, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=figsize)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(
corr_mat
, mask=mask
, cmap='RdBu_r'
, vmin=-1.0
, vmax=1.0
, center=0
, square=True
, linewidths=.5
, cbar_kws={"shrink": .5}
, annot=True
, fmt=".2f"
);
pass
def drop_prevalent(df, threshold=0.01):
"""
if 99% is one single answer drop
"""
col_to_drop_prevalent = list()
for col in df.columns:
prevalent = df[col].value_counts(normalize=True).max()
if 1-prevalent < threshold:
col_to_drop_prevalent.append(col)
return col_to_drop_prevalent
def prevalent_analysis(df):
"""
#TODO Build Example:
import pandas as pd
import numpy as np
import plotly.express as px
fig = px.scatter(df_prevalent, x='Threshold', y='Dropped columns amount')
fig.show()
arr_bool = np.empty(shape=(len(df_prevalent),len(df.columns)))
ii = 0
jj = 0
for item in drop_prevalent_list:
for col in df.columns:
arr_bool[ii,jj] = col in item
jj+=1
ii+=1
jj=0
df2 = pd.DataFrame(arr_bool)
df2.columns = df.columns
abs(df2.sum()-101).sort_values() # Threshold to discard each column %
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
drop_prevalent_len = list()
drop_prevalent_list = list()
arr_threshold = np.linspace(0,1,101)
for threshold in arr_threshold:
drop_it = drop_prevalent(df, threshold)
drop_prevalent_list.append(drop_it)
drop_prevalent_len.append(len(drop_it))
fig, ax = plt.subplots()
ax.plot(arr_threshold, drop_prevalent_len)
ax.set_xlabel('Threshold')
ax.set_ylabel('Colums to drop')
plt.show()
df_prevalent = pd.DataFrame([arr_threshold,drop_prevalent_len]).T
df_prevalent.columns = ['Threshold', 'Dropped columns amount']
pass
def label_encode_categorical_values(df, index, plot=True):
"""
Args:
df: pd.DataFrame
index: string (col_name as data index)
Return:
df_encoded: label encoding
dct_encoding: dict mapping
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df_encoded = df.copy()
lst_col_string = df.select_dtypes(
include='object', # 'number' # 'object' # 'datetime' #
exclude=None
).columns.to_list()
dct_encoding = {}
for idx, col in enumerate(lst_col_string):
dct = pd.DataFrame(
{"name":df[col],
"code":df[col].astype('category').cat.codes}
).drop_duplicates().set_index("name").to_dict()['code']
# lst_dct_strings.append({col: dct})
# https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
dct = dict(sorted(dct.items(), key=lambda item: item[1]))
dct_encoding[col] = dct
df_encoded[col] = df_encoded[col].map(dct)
if plot:
fig, ax = plt.subplots()
sns.scatterplot(
data=df,
x=index,
y=col,
hue=col,
legend=False
)
# move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
plt.show()
sns.countplot(
data=df,
y=col
)
plt.show()
# positions = tuple(dct.values())
# labels = dct.keys()
# df_encoded.plot(x=index,y=col)
# plt.yticks(positions, labels)
# plt.title(col)
# plt.show()
# df[col].value_counts().plot(kind='barh')
# plt.title(col)
# plt.show()
return df_encoded, dct_encoding
def convert_0_1(arr, threshold=0.5):
"""
Convert a probabilistic array into binary values (0,1)
Args:
arr: np.array with probabilities
threshold (optional, default=0.5: probability limit)
if element < threshold = 0
if element >= threshold = 1
Return:
arr_0_1: np.array with 0 or 1 values
"""
import numpy as np
arr_0_1 = np.copy(arr)
arr_0_1[arr<threshold] = 0
arr_0_1[arr>=threshold] = 1
arr_0_1 = np.array([np.int32(x.item()) for x in arr_0_1])
return arr_0_1
def standardize_df(df):
"""
Standardize a dataframe (mean centered at 0 and unitary standard deviation)
Args:
df: pd.DataFrame with only numeric values
Return:
df: pd.DataFrame standardized
"""
return (df-df.mean())/df.std()
def normalize_df(df):
"""
Normalize a dataframe (range[0,1])
Args:
df: pd.DataFrame with only numeric values
Return:
df: pd.DataFrame normalized
"""
return (df-df.min())/(df.max()-df.min())
def eda_plot(df, index):
import matplotlib.pyplot as plt
import seaborn as sns
# width, height
fig_size_base = (6,6)
total_fig = len(df.select_dtypes('number').columns)
cols = 6
rows = total_fig // cols + 1
# check if you have more space as figures to plot
assert rows*cols > total_fig
fig_size_hist_line = (cols*fig_size_base[0],
rows*fig_size_base[1])
fig_scale_heatmap = 0.75
fig_size_heatmap = (fig_scale_heatmap*fig_size_hist_line[0],
fig_scale_heatmap*fig_size_hist_line[1])
fig_scale_boxplot = 0.25
fig_size_boxplot = (fig_scale_boxplot*fig_size_hist_line[0],
fig_scale_boxplot*fig_size_hist_line[1])
# Histogram
fig, ax = plt.subplots(figsize=fig_size_hist_line)
df.drop(columns=[index]).hist(ax=ax, layout=(rows,cols))
plt.show()
# Heatmap
plot_heatmap_nr(df.corr(), figsize=fig_size_heatmap)
# Line plot
df.plot(
x=index,
subplots=True,
sharex=False,
layout=(rows,cols),
figsize=fig_size_hist_line
)
plt.show()
# Boxplot
fig, ax = plt.subplots(figsize=fig_size_boxplot)
# df_standardized.boxplot(vert=False, ax=ax)
sns.boxplot(
data=standardize_df(df.drop(columns=[index]))
, ax=ax
, orient='h'
)
# sns.stripplot(
# data=df_standardized
# , ax=ax
# , orient='h'
# , color=".2"
# )
plt.show()
pass
def replace_str_nan_by_np_nan(df_str_nan):
"""
dealing with nan strings, since fillna handles only np.nan
Args: df with string nan
Return: df with np.nan
Ex:
import pandas as pd
import numpy as np
df_str_nan = pd.DataFrame({
'age':['np.nan',34,19],
'gender':['Nan',np.nan,'M'],
'profession':['student', 'nan', 'artist']})
df_np_nan = replace_str_nan_by_np_nan(df_str_nan)
print(df_np_nan.isna())
age gender profession
0 True True False
1 False True True
2 False False False
"""
import numpy as np
df_np_nan = df_str_nan.copy()
for nan in ['np.nan', 'NaN', 'Nan', 'nan']:
df_np_nan = df_np_nan.replace(nan, np.nan, regex=True)
return df_np_nan
def join_df1_df2_repeated_col(df1, df2):
"""
join two dataframes keeping values within repeated columns
dealing with nan strings, since fillna handles only np.nan
Args: df1, df2 two dataframes
Return: df_join joined dataframe
Ex:
import pandas as pd
import numpy as np
df1 = pd.DataFrame({
'age':[7,34,19],
'gender':['F',np.nan,'M'],
'profession':['student', 'CEO', 'artist']})
df2 = pd.DataFrame({
'age':[7,34,19],
'gender':['np.nan','F',np.nan],
'interests':['acting', 'cars', 'gardening']})
print(join_df1_df2_repeated_col(df1, df2))
age gender profession interests
0 7 F student acting
1 34 F CEO cars
2 19 M artist gardening
"""
import pandas as pd
import numpy as np
# dealing with nan strings, since fillna handles only np.nan
df1 = replace_str_nan_by_np_nan(df1)
df2 = replace_str_nan_by_np_nan(df2)
# join and dealing with repeated columns
rsuffix = "_r"
df_join = df1.join(df2, rsuffix=rsuffix)
mask = df_join.columns.str.endswith(rsuffix)
lst_col_r = list(df_join.loc[:,mask].columns)
for col_r in lst_col_r:
col = col_r[:-len(rsuffix)]
df_join[col] = df_join[col].fillna(df_join[col_r])
return df_join.drop(columns=lst_col_r)
def drop_outliers(df, method='std', value=3):
"""
Drop outliers within a dataframe
Args: df: pd.DataFrame with numeric entries
method: string ['std','iqr'], default: 'std'
'std': check if the data are within a standard range
as a z_score in an equivalent normal distribution
'iqr': check if the data are within a interquartile range
iqr = quantile(0.75) - quantile(0.25)
value: float, default 3
for 'std' method corresponds to a standard deviation factor
for 'iqr' method corresponds to a interquartile factor
Return:
df_no_outliers: pd.DataFrame without outliers regardless the columns
src: https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
"""
import numpy as np
if method == 'std':
df_no_outliers = df[df
.apply(lambda x: np.abs(x - x.mean()) / x.std() < 3)
.all(axis=1)]
elif method == 'iqr':
df_no_outliers = df[df
.apply(lambda x: x.between(
x.quantile(0.25)-value*(x.quantile(0.75)-x.quantile(0.25)),
x.quantile(0.75)+value*(x.quantile(0.75)-x.quantile(0.25))))
.all(axis=1)]
return df_no_outliers
def detect_outliers(df, method='std', value=3):
"""
Detect outliers within a dataframe
Args: df: pd.DataFrame with numeric entries
method: string ['std','iqr'], default: 'std'
'std': check if the data are within a standard range
as a z_score in an equivalent normal distribution
'iqr': check if the data are within a interquartile range
iqr = quantile(0.75) - quantile(0.25)
value: float, default 3
for 'std' method corresponds to a standard deviation factor
for 'iqr' method corresponds to a interquartile factor
Return:
df_outliers: pd.DataFrame with the outliers regardless the columns
"""
import numpy as np
import pandas as pd
if method == 'std':
df_outliers = df[df
.apply(lambda x: np.abs(x - x.mean()) / x.std() >= value)
.any(axis=1)]
elif method == 'iqr':
# iqr = df.quantile(0.75) - df.quantile(0.25)
# lim_inf = df.quantile(0.25) - 1.5*iqr
# lim_sup = df.quantile(0.75) + 1.5*iqr
df_outliers_inf = df[df
.apply(lambda x: x <= x.quantile(0.25)
- value * (x.quantile(0.75) - x.quantile(0.25)))
.any(axis=1)]
df_outliers_sup = df[df
.apply(lambda x: x >= x.quantile(0.75)
+ value * (x.quantile(0.75) - x.quantile(0.25)))
.any(axis=1)]
df_outliers = pd.concat([df_outliers_inf, df_outliers_sup]).drop_duplicates()
return df_outliers
def get_df_stats(df):
df_stats = df.describe().T
df_stats['IQR'] = df_stats['75%'] - df_stats['25%']
# df_stats['lim_inf_1.5IQR'] = df_stats['25%'] - 1.5 * df_stats['IQR']
df_stats['lim_inf_1.5IQR'] = df.quantile(0.25) - 1.5*(df.quantile(0.75) - df.quantile(0.25))
df_stats['lim_inf_3std'] = df.mean()-3*df.std()
# df_stats['lim_sup_1.5IQR'] = df_stats['75%'] + 1.5 * df_stats['IQR']
df_stats['lim_sup_1.5IQR'] = df.quantile(0.75) + 1.5*(df.quantile(0.75) - df.quantile(0.25))
df_stats['lim_sup_3std'] = df.mean()+3*df.std()
return df_stats.T
def get_tscv_index(y, test_idx_start, test_size=1):
"""
Args:
y: numpy array to get number of elements
test_idx_start: int where the first test start
test_size: int number of elements in test (default=1)
Return:
lst_train_idx: lst with np.array with indexes
for n_splits time series
each one starting from 0 and finishing at (test_idx_start-1) + ii
where ii ranges from 0 to n_splits
lst_test_idx: lst with with np.array with indexes
for all time series cross validate sequence
from (train_idx_start+1)+ii to (train_idx_start+1)+ii + test_size
up to the last element in y
Minimum example:
import numpy as np
len_y = 7
test_size = 2
test_idx_start = 3
X = np.random.randn(len_y, 2)
y = np.random.randint(0, 2, len_y)
lst_train_idx, lst_test_idx = get_tscv_index(y, test_idx_start, test_size)
# test_idx_start_last = len_y-test_size
n_splits = (len_y-test_size) - test_idx_start + 1
assert len(lst_train_idx) == len(lst_test_idx)
assert len(lst_train_idx) == n_splits
print(f"n_splits: {n_splits}\n")
for idx, train_index in enumerate(lst_train_idx):
test_index = lst_test_idx[idx]
print(f"Train: idx: {train_index}\t\t Test: idx: {test_index}")
'''
Out:
n_splits: 3
Train: idx: [0 1 2] Test: idx: [3 4]
Train: idx: [0 1 2 3] Test: idx: [4 5]
Train: idx: [0 1 2 3 4] Test: idx: [5 6]
'''
"""
import numpy as np
n_splits = (len(y)-test_size) - test_idx_start + 1
lst_train_idx = [np.arange(test_idx_start+ii) for ii in range(n_splits)]
lst_test_idx = [(test_idx_start+ii)+np.arange(test_size) for ii in range(n_splits)]
return lst_train_idx, lst_test_idx
def get_tscv_X_y(X, y, lst_train_idx, lst_test_idx):
lst_X_train = [X[ii] for ii in lst_train_idx]
lst_X_test = [X[ii] for ii in lst_test_idx]
lst_y_train = [y[ii] for ii in lst_train_idx]
lst_y_test = [y[ii] for ii in lst_test_idx]
return lst_X_train, lst_X_test, lst_y_train, lst_y_test
def get_X_y(df, lst_col_X, target):
X = df[lst_col_X].values
y = df[target].values.ravel()
return X, y
def get_rank_col(df):
rank = df.columns[df.columns.str.endswith('_rank')][0]
return rank
def plot_eda_results(df, metric, hue, plot, n=False):
import seaborn as sns
import matplotlib.pyplot as plt
if n:
df = df.sort_values(by=get_rank_col(df)).head(n)
fig, ax = plt.subplots()
if plot=='hist':
sns.histplot(
data=df,
x=metric,
hue=hue,
multiple="stack",
palette='deep'
)
move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
elif plot=='boxplot':
sns.boxplot(data=df, x=metric, y=hue)
plt.ylabel(None)
if hue == 'n_var':
print('#TODO: check this')
# n=10
# start, end = ax.get_xlim()
# positions = np.linspace(start, end, n)
# labels = np.linspace(df[metric].min(), df[metric].max(), n)
# plt.xticks(positions, labels)
# import matplotlib.ticker as ticker
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
elif plot =='scatter' and n:
sns.scatterplot(
data=df,
x=metric,
y='lst_col_str',
hue='model'
)
if metric == 'matthews_corrcoef':#'f1_score':
target_val = 1
problema = 'classificadores'
elif metric == 'mean_squared_error':
target_val = 0
problema = 'regressores'
if hue == 'model':
text_var = 'modelos'
elif hue == 'n_var':
text_var = 'número de variáveis'
elif hue == 'lst_col_str':
text_var = 'variáveis'
plt.title(f'Desempenho de {df.shape[0]} {problema}, \n considerando {text_var}')
plt.xlabel(f'{metric} (quanto mais próximo de {target_val} melhor)')
plt.show()
pass
def table_results(dct_base, df, metric, index, figsize, n=10):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import matplotlib.pyplot as plt
import pandas as pd
df = df.sort_values(by=get_rank_col(df)).head(n)
df[metric] = df[metric].round(2)
if index == 'lst_col_str':
df = df[[index, 'model', metric]].set_index(index)
elif index == 'model':
df = df[[index, metric]+dct_base['lst_col_selected']].set_index(index).T
fig, ax = plt.subplots(figsize=figsize)
ax.axis("off")
pd.plotting.table(ax, df)
plt.show()
pass
def eda_results_clf_reg(dct_base, df_results, metric):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import pandas as pd
plot_eda_results(df=df_results, metric=metric, hue='model', plot='boxplot')
plot_eda_results(df=df_results, metric=metric, hue='model', plot='hist')
# plot_eda_results(df=df_results, metric=metric, hue='n_var', plot='boxplot')
plot_eda_results(df=df_results, metric=metric, hue='n_var', plot='hist')
plot_eda_results(df=df_results, metric=metric, hue='lst_col_str', plot='hist', n=10)
plot_eda_results(df=df_results, metric=metric, hue='model', plot='scatter', n=20)
table_results(dct_base, df_results, metric, index='lst_col_str', figsize=(3,0.1), n=10)
table_results(dct_base, df_results, metric, index='model', figsize=(15,0.1), n=10)
# metric stats for each algorithm
pd.options.display.latex.repr=True
display(df_results
.groupby('model')[[metric]]
.agg(['min','max','median','std','mean'])
.sort_values(by=[(metric, 'median')], ascending=False))
pd.options.display.latex.repr=False
rank_col = get_rank_col(df_results)
print('Distribuição dos melhores algoritmos de cada conjunto de variáveis.')
# best classifier for each variables set
best_model_each_var_set = (
df_results
.sort_values(rank_col)
.drop_duplicates(['lst_col_str'])
)
# ut.display_full(
# best_model_each_var_set
# [['clf_rank','model',metric]+lst2]
# .head(5)
# )
# maior ocorrência dos algoritmos
pd.options.display.latex.repr=True
display(best_model_each_var_set['model'].value_counts())
pd.options.display.latex.repr=False
pass
def get_lst_features(lst_col_selected, index, target):
lst_features = [x for x in lst_col_selected if x not in [index, target]]
return lst_features
def get_df_results(dct_model, dct_base):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import pandas as pd
df_results = pd.DataFrame.from_dict(dct_model, orient='index')
df_results['lst_col_str'] = df_results['lst_features'].apply(
lambda x: ', '.join(map(str, x)))
df_results[dct_base['lst_col_selected']] = 0
for col in dct_base['lst_col_selected']:
mask = df_results['lst_col_str'].str.contains(col, regex=False)
df_results.loc[mask, col] = 1
return df_results
def get_df_results_reg(dct_reg, dct_base, metric_reg):
df_results_reg = get_df_results(dct_reg, dct_base)
# results regression
df_results_reg = df_results_reg.drop(
columns=["reg","scaler_x","scaler_y",
"y_pred","y_test","y_pred_train",
"annotation_string"]
)
# overall best regressors
df_results_reg['reg_rank'] = df_results_reg[metric_reg].rank()
return df_results_reg
def get_pred_train_test(dct_reg, ii):
import numpy as np
y_pred_whole_series = np.concatenate( (
dct_reg[ii]["y_pred_train"],
dct_reg[ii]["y_pred"]
))
return y_pred_whole_series
def plot_reg(df_no_miss_data_reg, df_results_reg, dct_reg, index, target, test_start_idx_reg, metric_reg, ii):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import matplotlib.pyplot as plt
y_pred_whole_series = get_pred_train_test(dct_reg, ii)
df_no_miss_data_reg.plot(x=index, y=target, kind="scatter")
plt.plot(df_no_miss_data_reg[index], y_pred_whole_series, color="red")
rank = int(df_results_reg["reg_rank"].iloc[ii])
plt.title(f'{dct_reg[ii]["model"]}: {rank}/{len(dct_reg)}')
plt.axvline(x=(df_no_miss_data_reg[index]
.iloc[test_start_idx_reg]),
linewidth=1, color='k')
plt.xlabel(index)
plt.annotate(fr'{dct_reg[ii]["annotation_string"]}',
xy=(1.04, 0.60), xycoords='axes fraction')
plt.annotate(f'ID: {ii}\n{metric_reg}: {dct_reg[ii][metric_reg]:.3f}',
xy=(1.04, 0.1), xycoords='axes fraction')
plt.show()
pass
def get_dct_coef(algoritmo, lst_features, model):
if (algoritmo in ["Decision tree", "Random forest"]) and len(lst_features)>1:
dct_coef = dict(zip(lst_features, model.feature_importances_))
elif algoritmo == "Regressão linear":
dct_coef = dict(zip(lst_features, model.coef_[0]))
dct_coef["Constante"] = model.intercept_[0]
else:
dct_coef = {}
return dct_coef
def get_annotation_string(algoritmo, lst_features, dct_coef):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
if not dct_coef:
iterable = lst_features
name = "Features"
else:
iterable = { f'{k}: {round(v, 3)}' for k,v in dct_coef.items() }
if algoritmo == "Regressão linear":
name = "Coeficientes"
elif (algoritmo in ["Decision tree", "Random forest"]) and len(lst_features)>1:
name = "Feature importance"
annotation_string = f'{name}:\n' + '\n'.join(iterable)
return annotation_string
def plot_confusion_matrix(dct_clf, ii):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import matplotlib.pyplot as plt
make_confusion_matrix(
dct_clf[ii]['confusion_matrix_test'],
group_names=[f"tn\npreviu descida\ne desceu",
f"fp\npreviu subida\ne desceu",
f"fn\npreviu descida\ne subiu",
f"tp\npreviu subida\ne subiu",],
categories=["descida","subida"]
)
plt.text(1.32,2.49,'taxa de acerto nas previsões')
plt.text(1.32,2.58,'taxa de acerto quando previu subida')
plt.text(1.32,2.69,'taxa de acerto quando subiu')
pass
def plot_errors_clf(df_encoded_raw, df_results_clf, dct_clf, index, target, test_start_idx_clf, ii):
# TODO: ABSTRAIR
# DEPENDE DO FRAMEWORK DO ARQUIVO
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# Errors representation
df_encoded_raw.iloc[test_start_idx_clf:].plot(x=index, y=target, ax=ax)
df_encoded_raw.iloc[dct_clf[ii]['fp']].plot(x=index, y=target, kind='scatter', color='green', marker='^', ax=ax)
df_encoded_raw.iloc[dct_clf[ii]['fn']].plot(x=index, y=target, kind='scatter', color='red', marker='v', ax=ax)
plt.legend(['real','fp: previu subida e desceu','fn: previu descida e subiu'])#, title=row['model'])
move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
rank = df_results_clf["clf_rank"].iloc[ii]
rank = int(rank) if ((rank % 1) < 0.1) else rank # keep decimal if tied
plt.title(f'{dct_clf[ii]["model"]}: {rank}/{len(dct_clf)}')
plt.xlabel(index)
plt.ylabel(target)
plt.annotate(fr'{dct_clf[ii]["annotation_string"]}',
xy=(1.04, 0.30), xycoords='axes fraction')
plt.annotate(f'ID: {ii}',
xy=(1.04, 0.1), xycoords='axes fraction')
plt.show()
plot_confusion_matrix(dct_clf, ii)
pass
def count_consecutive_items_n_cols(df, col_name_list, output_col):
"""
source: https://stackoverflow.com/questions/25119524/pandas-conditional-rolling-count
"""
cum_sum_list = [
(df[col_name] != df[col_name].shift(1)).cumsum().tolist()
for col_name in col_name_list
]
df_out = df.copy()
df_out[output_col] = df[col_name_list].groupby(
["_".join(map(str, x)) for x in zip(*cum_sum_list)]
).cumcount() + 1
return df_out
def add_col_max_sequence_event_count(df, col):
"""
source: https://stackoverflow.com/questions/66822515/how-to-repeat-the-cumsum-for-previous-values-in-a-pandas-series-when-the-count
"""
df_out = df.copy()
df_out[col+'_max'] = df[col].groupby(df[col].eq(1).cumsum()).transform('max')
return df_out
def multiline(xs, ys, c, ax=None, **kwargs):
"""Plot lines with different colorings
Parameters
----------
xs : iterable container of x coordinates
ys : iterable container of y coordinates
c : iterable container of numbers mapped to colormap
ax (optional): Axes to plot on.
kwargs (optional): passed to LineCollection
Notes:
len(xs) == len(ys) == len(c) is the number of line segments
len(xs[i]) == len(ys[i]) is the number of points for each line (indexed by i)
Returns
-------
lc : LineCollection instance.
Source:
https://stackoverflow.com/questions/38208700/matplotlib-plot-lines-with-colors-through-colormap
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
# find axes
ax = plt.gca() if ax is None else ax
# create LineCollection
segments = [np.column_stack([x, y]) for x, y in zip(xs, ys)]
lc = LineCollection(segments, **kwargs)
# set coloring of line segments
# Note: I get an error if I pass c as a list here... not sure why.
lc.set_array(np.asarray(c))
# add lines to axes and rescale
# Note: adding a collection doesn't autoscale xlim/ylim
ax.add_collection(lc)
ax.autoscale()
return lc
def get_cols_date(df_in, index):
df = df_in.copy()
df['year'] = df[index].dt.year
df['month'] = df[index].dt.month
dct_dt = {
'bimester': 2,
'trimester': 3, # quarter
'quadrimester': 4,
'semester': 6
}
for period, n_months in dct_dt.items():
df[period] = df['month'].apply(
lambda month:
(month-1)//n_months + 1
# n_months_group(month, n_months)
)
return df
def get_ano_safra(month, year, safra_start_month):
return f'{year}-{year+1}' if (month>=safra_start_month) else f'{year-1}-{year}'
def plot_limit_pct_change(df, col, index, pct_change):
import matplotlib.pyplot as plt
# import matplotlib.ticker as mtick
from matplotlib.ticker import FuncFormatter
ax = df[[col]].pct_change().set_index(df[index]).plot(legend=False)
for y in [-pct_change, pct_change]:
ax.hlines(y=y,
xmin=df[index].iloc[0],
xmax=df[index].iloc[-1],
colors='black',
linestyle='dashed'
)
# ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0, decimals=0))
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))
plt.ylabel('Variação percentual '+col)
# plt.legend()
plt.show()
pass
def get_3_classes(x, pct_change):
import numpy as np
if np.isnan(x):
return np.nan
elif x > pct_change:
return 1 # increase
elif x < -pct_change:
return -1 # decrease
elif (x >= -pct_change) and (x <= pct_change):
return 0 # stagnation
else:
print(x, type(x))
raise Exception('Not should be here')
def plot_col_level_vs_target(df, index, target_reg, pct_change_over_name, col, bins):
import matplotlib.pyplot as plt
import seaborn as sns
# import utils as ut
col_level = col+'_'+''.join([str(x) for x in range(bins)])
cm = sns.color_palette("RdBu", bins)
fig, ax = plt.subplots()
sns.scatterplot(
data=df,
x=index,
y=target_reg,
hue=col_level,
palette=cm,
style=pct_change_over_name,
markers=['v','s','^']
)
# ut.move_legend
move_legend(ax, "upper left", bbox_to_anchor=(1.04,1))
plt.show()
pass
def get_level_col(df, col, bins):
import pandas as pd
import matplotlib.pyplot as plt
import pprint
# import utils as ut
col_level = col+'_'+''.join([str(x) for x in range(bins)])
cut =
|
pd.cut(df[col], bins=bins)
|
pandas.cut
|
# Catboost for Avito Demand Prediction Challenge
# https://www.kaggle.com/c/avito-demand-prediction
# By <NAME>, April 2018
#https://www.kaggle.com/nicapotato/simple-catboost/code
import time
notebookstart = time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
from sklearn.model_selection import KFold
# print("Data:\n", os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import *
# Viz
# import seaborn as sns
# import matplotlib.pyplot as plt
print("\nData Load Stage")
debug=False
if debug:
nrows=10000*1
else:
nrows=1503424
training = pd.read_csv('../input/train.csv',nrows=nrows, index_col="item_id", parse_dates=["activation_date"])
traindex = training.index
len_train = len(training)
testing = pd.read_csv('../input/test.csv',nrows=nrows, index_col="item_id", parse_dates=["activation_date"])
testdex = testing.index
y = training.deal_probability.copy()
training.drop("deal_probability", axis=1, inplace=True)
import pickle
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df =
|
pd.DataFrame(train_blurinesses, columns=['blurinesses'])
|
pandas.DataFrame
|
import pandas as pd
import os
import numpy as np
from pandas.core.frame import DataFrame
import lightgbm as lgb
class predict:
def __init__(self):
self.setConstants()
def setConstants(self):
self.houses = []
self.noMatchHouses = []
from ..models import models_logs
model = models_logs.objects.get(inUseFlag=1, trainSuccess=1)
print("将使用%s号模型" % model.id)
model_id = model.id
self.beginDate = model.startMonth.strftime('%Y-%m')
self.endDate = model.endMonth.strftime('%Y-%m')
'''
测试用
print("将使用%s号模型" % 57)
model_id = 57
self.beginDate = '2017-02'
self.endDate = '2017-03'
'''
# 新房数据表路径
self.newdisk_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDisk.csv'
# 房源属性数据表路径
self.property_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_Property.csv'
# 地址数据表路径
self.address_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDiskAddress.csv'
# 挂牌数据路径
self.data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.model_dir = os.path.dirname(os.path.realpath(__file__)) + '/cache/model_%s/' % (model_id)
if not os.path.exists(self.model_dir):
print("模型训练有问题")
return "模型训练有问题"
# 房源中位数价格路径
self.medprice_path = self.model_dir + '/medprice.csv'
# 区名特征化路径
self.arealabel_path = self.model_dir + '/arealabel.csv'
# 板块名特征化路径
self.platelabel_path = self.model_dir + '/platelabel.csv'
# 内中外环特征化路径
self.modulelabel_path = self.model_dir + 'modulelabel.csv'
# 模型缓存路径
self.cache_path_model = self.model_dir + '/model.txt'
# 挂牌缓存路径
self.cache_path_guapai = os.path.dirname(os.path.realpath(__file__)) + '/cache/guapai_%s-%s.hdf' % (self.beginDate, self.endDate)
# 预处理缓存路径
self.cache_path_feats = os.path.dirname(os.path.realpath(__file__)) + '/cache/feats_%s-%s.hdf' % (self.beginDate, self.endDate)
self.meta_df = pd.read_hdf(self.cache_path_guapai, 'meta')
self.gbm = lgb.Booster(model_file=self.cache_path_model)
self.med_price = pd.read_csv(self.medprice_path)
self.arealabel = pd.read_csv(self.arealabel_path, usecols=["label", "area"])
self.arealabel.set_index(["area"], inplace=True)
self.arealabel = self.arealabel.to_dict()["label"]
self.platelabel = pd.read_csv(self.platelabel_path, usecols=["label", "plate"])
self.platelabel.set_index(["plate"], inplace=True)
self.platelabel = self.platelabel.to_dict()["label"]
self.modulelabel = pd.read_csv(self.modulelabel_path, usecols=["Module", "unit_price"])
self.modulelabel.set_index(["Module"], inplace=True)
def make_coordinates(self, data):
coors = []
# for i in tqdm(data):
for i in data:
if type(i) == str and i != '公寓' and i != '商业' and i != '其它':
coors.append(i.split(','))
else:
coors.append([None, None])
coors = pd.DataFrame(coors, columns=['loc_x', 'loc_y'])
# coors=pd.DataFrame([coor.split(',') for coor in all_df.Coordinates],columns=['loc_x','loc_y'],index=all_df.index)
coors = coors.astype(float)
return coors
def find_DiskID(self, address):
# 为了改进体验,希望以后可以在网页中输入地址时就判断出是否存在某个小区,而不是在预测前再返回错误信息!
# address = address_filter(address)
address_df = pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
# address_df = tools.read_basic_table("AD_NewDiskAddress")
address_df.rename(columns={'RoadLaneNo': 'address'}, inplace=True)
address_all = pd.merge(self.meta_df[["NewDiskID", "name"]], address_df, how='left', on='NewDiskID').dropna(axis=0,
how='any')
address_fit = address_all[address_all.address.str.contains(address)]
address_fit = address_fit.head(1) # 取第一个匹配的
if address_fit.empty:
print("找不到对应的小区!") # 报错信息记得返回到前端
return (None, None, None)
else:
print(address_fit)
NewDiskID = address_fit.iat[0, 0]
return (NewDiskID, address_fit.iat[0, 1], address_fit.iat[0, 2])
###############################################
## 需要改写,将查找功能放到数据库,不要在本地查找。##
def find_DiskID_ByName(self, diskname_input):
address_df =
|
pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
|
tm.assertIsInstance(treader, TextFileReader)
|
pandas.util.testing.assertIsInstance
|
import pandas as pd
import numpy as np
import scipy as sp
import math
from scipy import stats
from biofes import biplot
from sklearn.utils.extmath import randomized_svd
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn import metrics
from sklearn.metrics import pairwise_distances
import seaborn as sns
import matplotlib.pyplot as plt
## Hilfsfunktionen
# Funktion, um Index (Name) einer Bezeichnung im Glossar zu finden:
def get_name_gloss(gloss, word):
for i, val in enumerate(gloss[0]):
if word.lower() in str(val).lower():
return i, gloss.index[i]
# Funktion, um Bezeichnung von Index im Glossar zu finden:
def get_name_glossind(gloss,word):
for i, val in enumerate(gloss.index):
if word.lower() in str(val).lower():
return i, gloss.loc[val][0]
# Funktion zur Hinzufügung neuer Spalten unter bestimmter Kondition:
def add_columns(data,lst,col_neu):
for i in data.index:
for el in data.loc[i,lst]:
if el == 1:
data.loc[i,col_neu] = 1
pass
## Grafiken
# Funktion für Darstellung mehrerer Histogramme:
def graf_hist(D,l,sx,sy,nrow,ncol):
fig = plt.figure(figsize=(sx,sy))
for i, col in enumerate(l):
plt.subplot(nrow,ncol,i+1)
plt.hist(D[D[col]!=0][col],color = '#0063A6')
plt.xticks([1,2,3,4,5])
plt.title(col,size=13)
plt.show()
# Funktion für Darstellung der Likert-Skalen:
def graf_likert(D,sy):
likert_colors = ['white', 'firebrick','lightcoral','gainsboro','cornflowerblue', 'darkblue']
df = D.apply(pd.value_counts).T
if 0 in df.columns:
df = df.drop(0, axis=1).astype(int)
middles = df[[1, 2]].sum(axis=1)+df[3]*.5
longest = middles.max()
complete_longest = df.sum(axis=1).max()
df.insert(0, '', (middles - longest).abs())
df.plot.barh(stacked=True, figsize=(8,sy), color=likert_colors, edgecolor='none', legend=False)
z = plt.axvline(longest, linestyle='--', color='black', alpha=.5)
z.set_zorder(-1)
plt.show()
# Funktion für Darstellung der Likert-Skalen (Version für Pilotstudie):
def graf_likert1(D,sy):
likert_colors = ['white', 'firebrick','lightcoral','gainsboro','cornflowerblue', 'darkblue']
df = D.apply(pd.value_counts).T
if 0 in df.columns:
df = df.drop(0, axis=1)
middles = df[[1, 2]].sum(axis=1)+df[3]*.5
longest = middles.max()
complete_longest = df.sum(axis=1).max()
df.insert(0, '', (middles - longest).abs())
df.plot.barh(stacked=True, figsize=(8,sy), color=likert_colors, edgecolor='none', legend=False)
z = plt.axvline(longest, linestyle='--', color='black', alpha=.5)
z.set_zorder(-1)
plt.show()
# Differenzen in Barcharts für Werte ohne 0:
def graf_diff(data,item,var):
df = pd.crosstab(data[data[var]!=0][item],data[data[var]!=0][var])
return df.plot(kind='bar')
# Differenzen in Barcharts für Werte mit 0:
def graf_diff2(data,item,var):
df = pd.crosstab(data[item],data[var])
return df.plot(kind='bar')
# Differenzen in Barcharts für Werte ohne 0, normalisierte Darstellung:
def graf_diff_norm(data,item,var):
df = pd.crosstab(data[data[var]!=0][item],data[data[var]!=0][var],normalize='index')
for col in df.columns:
print('n('+ str(col) + ')= ' + str(len(data[data[var] == col])))
return df.plot(kind='bar',stacked=True)
# Differenzen in Likert-Graphiken für Werte ohne 0:
def graf_diff_likert(data,item,var):
data = data[(data[var]!=0) & (data[item]!=0)]
likert_colors = ['firebrick','lightcoral','gainsboro','cornflowerblue', 'darkblue']
df = pd.crosstab(data[item],data[var])
for col in df.columns:
df[col] = df[col]*100/df[col].sum()
print('n('+ str(col) + ')= ' + str(len(data[data[var] == col])))
return df.T.plot(kind='barh', stacked='True',color=likert_colors,legend=None)
# Differenzen in Likert-Graphiken für Werte (in Gruppen) ohne 0:
def graf_diff_likert2(data,item,var):
data = data[data[item]!=0]
likert_colors = ['firebrick','lightcoral','gainsboro','cornflowerblue', 'darkblue']
df = pd.crosstab(data[item],data[var])
for col in df.columns:
df[col] = df[col]*100/df[col].sum()
print('n('+ str(col) + ')= ' + str(len(data[data[var] == col])))
return df.T.plot(kind='barh', stacked='True',color=likert_colors,legend=None)
# Marker definieren:
def vector_to_shape(v):
markers = ["p", "," , "o" , "v" , "x", "^", "D", "*", "1","<",">","d"]
return [markers[el] for el in v]
# Farben definieren:
def vector_to_color(v):
col = ['orange','b', 'g', 'r', 'm', 'c', 'y', 'k', 'saddlebrown','pink','grey','indigo']
return [col[el] for el in v]
## Statistik
# Deskriptive Statistik:
def df_describe(data,lst):
df = pd.DataFrame(data[lst]).describe()
df = df.T
mode = pd.DataFrame(data[lst]).mode().T
median = pd.DataFrame(data[lst]).median().T
kurtosis = pd.DataFrame(data[lst]).kurtosis().T
skew = pd.DataFrame(data[lst]).skew().T
df['mode'] = mode
df['median'] = median
df['kurtosis'] = kurtosis
df['skew'] = skew
return df
def df_describe1(data,lst):
df = pd.DataFrame(data[lst]).describe()
df = df.T
median = pd.DataFrame(data[lst]).median().T
kurtosis = pd.DataFrame(data[lst]).kurtosis().T
skew = pd.DataFrame(data[lst]).skew().T
df['median'] = median
df['kurtosis'] = kurtosis
df['skew'] = skew
return df
# Übersicht über prozentuelles Antwortverhalten:
def table_porc(D):
df = D.apply(pd.value_counts).fillna(0).astype(int).T
total_row = df.sum(axis=1)
for i in df.index:
df.loc[i] = df.loc[i]*100/total_row.loc[i]
df['n'] = total_row
return df
# Cronbach's Alpha:
def CronbachAlpha(itemscores):
itemscores = np.asarray(itemscores)
itemvars = itemscores.var(axis=0, ddof=1)
tscores = itemscores.sum(axis=1)
nitems = itemscores.shape[1]
calpha = nitems / float(nitems-1) * (1 - itemvars.sum() / float(tscores.var(ddof=1)))
return calpha
# Spearman's Rho:
def df_spearman(data,lst):
df = data[lst].corr(method = 'spearman')
df = df.style.applymap(lambda x: 'background-color : salmon' if abs(x) > 0.5
else 'background-color: lightsalmon' if abs(x) > 0.3 and abs(x) <= 0.5
else 'background-color: peachpuff' if abs(x) > 0.1 and abs(x) <= 0.3
else 'background-color : white')
return df
def df_spearman_pv(data,lst):
corr, pv = stats.spearmanr(data[lst])
df =
|
pd.DataFrame(pv)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import division
import pandas as pd
import numpy as np
from .exceptions import MaxLossExceededError, non_unique_bin_edges_error
from .utils import get_forward_returns_columns
@non_unique_bin_edges_error
def quantize_factor(
factor_data, quantiles=5, bins=None, by_group=False, no_raise=False, zero_aware=False,
):
"""
计算每期因子分位数
参数
----------
factor_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
quantiles : int or sequence[float]
在因子分组中按照因子值大小平均分组的组数。
或分位数序列, 允许不均匀分组
例如 [0, .10, .5, .90, 1.] 或 [.05, .5, .95]
'quantiles' 和 'bins' 有且只能有一个不为 None
bins : int or sequence[float]
在因子分组中使用的等宽 (按照因子值) 区间的数量
或边界值序列, 允许不均匀的区间宽度
例如 [-4, -2, -0.5, 0, 10]
'quantiles' 和 'bins' 有且只能有一个不为 None
by_group : bool
如果是 True, 按照 group 分别计算分位数
no_raise: bool, optional
如果为 True,则不抛出任何异常,并且将抛出异常的值设置为 np.NaN
zero_aware : bool, optional
如果为True,则分别为正负因子值计算分位数。
适用于您的信号聚集并且零是正值和负值的分界线的情况.
返回值
-------
factor_quantile : pd.Series
index 为日期 (level 0) 和资产(level 1) 的因子分位数
"""
if not ((quantiles is not None and bins is None) or
(quantiles is None and bins is not None)):
raise ValueError('quantiles 和 bins 至少要输入一个')
if zero_aware and not (isinstance(quantiles, int)
or isinstance(bins, int)):
msg = ("只有 quantiles 或 bins 为 int 类型时, 'zero_aware' 才能为 True")
raise ValueError(msg)
def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):
try:
if _quantiles is not None and _bins is None and not _zero_aware:
return
|
pd.qcut(x, _quantiles, labels=False)
|
pandas.qcut
|
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
def test_schemas(c):
df = c.sql("SHOW SCHEMAS")
df = df.compute()
expected_df = pd.DataFrame({"Schema": [c.schema_name, "information_schema"]})
|
assert_frame_equal(df, expected_df)
|
pandas.testing.assert_frame_equal
|
import os
import io
import random
import string
import re
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import nltk
from nltk import FreqDist
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import config
EMPH_TOKEN = config.EMPH_TOKEN
CONTRAST_TOKEN = config.CONTRAST_TOKEN
CONCESSION_TOKEN = config.CONCESSION_TOKEN
# TODO: redesign the data loading so as to be object-oriented
def load_training_data(data_trainset, data_devset, input_concat=False, generate_vocab=False, skip_if_exist=True):
"""Generate source and target files in the required input format for the model training.
"""
training_source_file = os.path.join(config.DATA_DIR, 'training_source.txt')
training_target_file = os.path.join(config.DATA_DIR, 'training_target.txt')
dev_source_file = os.path.join(config.DATA_DIR, 'dev_source.txt')
dev_target_file = os.path.join(config.DATA_DIR, 'dev_target.txt')
if skip_if_exist:
# If there is an existing source and target file, skip their generation
if os.path.isfile(training_source_file) and \
os.path.isfile(training_target_file) and \
os.path.isfile(dev_source_file) and \
os.path.isfile(dev_target_file):
print('Found existing input files. Skipping their generation.')
return
dataset = init_training_data(data_trainset, data_devset)
dataset_name = dataset['dataset_name']
x_train, y_train, x_dev, y_dev = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs and the utterances
x_train = [preprocess_mr(x, dataset['separators']) for x in x_train]
x_dev = [preprocess_mr(x, dataset['separators']) for x in x_dev]
y_train = [preprocess_utterance(y) for y in y_train]
y_dev = [preprocess_utterance(y) for y in y_dev]
# Produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_train[i] = delex_sample(mr_dict, y_train[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_train_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_train_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_train_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_train_seq[i].extend([key] + val.split())
else:
x_train_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_train_seq[i].append('<STOP>')
# Produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
slot_ctr += 1
# Delexicalize the MR and the utterance
y_dev[i] = delex_sample(mr_dict, y_dev[i], dataset=dataset_name, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_dev_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_dev_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_dev_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_dev_seq[i].extend([key] + val.split())
else:
x_dev_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_dev_seq[i].append('<STOP>')
y_train_seq = [word_tokenize(y) for y in y_train]
y_dev_seq = [word_tokenize(y) for y in y_dev]
# Generate a vocabulary file if necessary
if generate_vocab:
generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq),
vocab_filename='vocab.lang_gen.tokens')
# generate_vocab_file(np.concatenate(x_train_seq + x_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.source')
# generate_vocab_file(np.concatenate(y_train_seq + y_dev_seq),
# vocab_filename='vocab.lang_gen_multi_vocab.target')
with io.open(training_source_file, 'w', encoding='utf8') as f_x_train:
for line in x_train_seq:
f_x_train.write('{}\n'.format(' '.join(line)))
with io.open(training_target_file, 'w', encoding='utf8') as f_y_train:
for line in y_train:
f_y_train.write(line + '\n')
with io.open(dev_source_file, 'w', encoding='utf8') as f_x_dev:
for line in x_dev_seq:
f_x_dev.write('{}\n'.format(' '.join(line)))
with io.open(dev_target_file, 'w', encoding='utf8') as f_y_dev:
for line in y_dev:
f_y_dev.write(line + '\n')
return np.concatenate(x_train_seq + x_dev_seq + y_train_seq + y_dev_seq).flatten()
def load_test_data(data_testset, input_concat=False):
"""Generate source and target files in the required input format for the model testing.
"""
test_source_file = os.path.join(config.DATA_DIR, 'test_source.txt')
test_source_dict_file = os.path.join(config.DATA_DIR, 'test_source_dict.json')
test_target_file = os.path.join(config.DATA_DIR, 'test_target.txt')
test_reference_file = os.path.join(config.METRICS_DIR, 'test_references.txt')
dataset = init_test_data(data_testset)
dataset_name = dataset['dataset_name']
x_test, y_test = dataset['data']
_, _, slot_sep, val_sep, val_sep_end = dataset['separators']
# Preprocess the MRs
x_test = [preprocess_mr(x, dataset['separators']) for x in x_test]
# Produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
x_test_dict = []
for i, mr in enumerate(x_test):
slot_ctr = 0
emph_idxs = set()
# contrast_idxs = set()
# concession_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
# elif slot == CONTRAST_TOKEN:
# contrast_idxs.add(slot_ctr)
# elif slot == CONCESSION_TOKEN:
# concession_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Build an MR dictionary with original values
x_test_dict.append(mr_dict_cased)
# Delexicalize the MR
delex_sample(mr_dict, dataset=dataset_name, mr_only=True, input_concat=input_concat)
slot_ctr = 0
# Convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
x_test_seq[i].append(EMPH_TOKEN)
# Insert the contrast token where appropriate
# if slot_ctr in contrast_idxs:
# x_test_seq[i].append(CONTRAST_TOKEN)
# # Insert the concession token where appropriate
# if slot_ctr in concession_idxs:
# x_test_seq[i].append(CONCESSION_TOKEN)
if len(val) > 0:
x_test_seq[i].extend([key] + val.split())
else:
x_test_seq[i].append(key)
slot_ctr += 1
if input_concat:
# Append a sequence-end token to be paired up with seq2seq's sequence-end token when concatenating
x_test_seq[i].append('<STOP>')
with io.open(test_source_file, 'w', encoding='utf8') as f_x_test:
for line in x_test_seq:
f_x_test.write('{}\n'.format(' '.join(line)))
with io.open(test_source_dict_file, 'w', encoding='utf8') as f_x_test_dict:
json.dump(x_test_dict, f_x_test_dict)
if len(y_test) > 0:
with io.open(test_target_file, 'w', encoding='utf8') as f_y_test:
for line in y_test:
f_y_test.write(line + '\n')
# Reference file for calculating metrics for test predictions
with io.open(test_reference_file, 'w', encoding='utf8') as f_y_test:
for i, line in enumerate(y_test):
if i > 0 and x_test[i] != x_test[i - 1]:
f_y_test.write('\n')
f_y_test.write(line + '\n')
def generate_vocab_file(token_sequences, vocab_filename, vocab_size=10000):
vocab_file = os.path.join(config.DATA_DIR, vocab_filename)
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size - 3)) # cap the vocabulary size
vocab_with_reserved_tokens = ['<pad>', '<EOS>'] + list(map(lambda tup: tup[0], vocab)) + ['UNK']
with io.open(vocab_file, 'w', encoding='utf8') as f_vocab:
for token in vocab_with_reserved_tokens:
f_vocab.write('{}\n'.format(token))
def get_vocabulary(token_sequences, vocab_size=10000):
distr = FreqDist(token_sequences)
vocab = distr.most_common(min(len(distr), vocab_size)) # cap the vocabulary size
vocab_set = set(map(lambda tup: tup[0], vocab))
return vocab_set
# TODO: generalize and utilize in the loading functions
def tokenize_mr(mr):
"""Produces a (delexicalized) sequence of tokens from the input MR.
Method used in the client to preprocess a single MR that is sent to the service for utterance generation.
"""
slot_sep = ','
val_sep = '['
val_sep_end = ']'
mr_seq = []
slot_ctr = 0
emph_idxs = set()
mr_dict = OrderedDict()
mr_dict_cased = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
if slot == EMPH_TOKEN:
emph_idxs.add(slot_ctr)
else:
mr_dict[slot] = value
mr_dict_cased[slot] = value_orig
slot_ctr += 1
# Delexicalize the MR
delex_sample(mr_dict, mr_only=True)
slot_ctr = 0
# Convert the dictionary to a list
for key, val in mr_dict.items():
# Insert the emphasis token where appropriate
if slot_ctr in emph_idxs:
mr_seq.append(EMPH_TOKEN)
if len(val) > 0:
mr_seq.extend([key] + val.split())
else:
mr_seq.append(key)
slot_ctr += 1
return mr_seq, mr_dict_cased
def load_training_data_for_eval(data_trainset, data_model_outputs_train, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_trainset or '\\rest_e2e\\' in data_trainset:
x_train, y_train_1 = read_rest_e2e_dataset_train(data_trainset)
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_trainset or '\\tv\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_tv_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_trainset or '\\laptop\\' in data_trainset:
x_train, y_train_1, y_train_2 = read_laptop_dataset_train(data_trainset)
if data_model_outputs_train is not None:
y_train_2 = read_predictions(data_model_outputs_train)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_train_1 = [preprocess_utterance(y) for y in y_train_1]
y_train_2 = [preprocess_utterance(y) for y in y_train_2]
# produce sequences of extracted words from the meaning representations (MRs) in the trainset
x_train_seq = []
for i, mr in enumerate(x_train):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_train_1[i] = delex_sample(mr_dict, y_train_1[i], dataset=dataset_name, utterance_only=True)
y_train_2[i] = delex_sample(mr_dict, y_train_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_train_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_train_seq[i].extend([key, val])
else:
x_train_seq[i].append(key)
# create source vocabulary
if os.path.isfile('data/eval_vocab_source.json'):
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
else:
x_distr = FreqDist([x_token for x in x_train_seq for x_token in x])
x_vocab = x_distr.most_common(min(len(x_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_source.json', 'w', encoding='utf8') as f_x_vocab:
json.dump(x_vocab, f_x_vocab, ensure_ascii=False)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# create target vocabulary
if os.path.isfile('data/eval_vocab_target.json'):
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
else:
y_distr = FreqDist([y_token for y in y_train_1 for y_token in y] + [y_token for y in y_train_2 for y_token in y])
y_vocab = y_distr.most_common(min(len(y_distr), vocab_size - 2)) # cap the vocabulary size
with io.open('data/eval_vocab_target.json', 'w', encoding='utf8') as f_y_vocab:
json.dump(y_vocab, f_y_vocab, ensure_ascii=False)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the training set
x_train_enc = token_seq_to_idx_seq(x_train_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_1_enc = token_seq_to_idx_seq(y_train_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the training set
y_train_2_enc = token_seq_to_idx_seq(y_train_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the training set
labels_train = np.concatenate((np.ones(len(y_train_1_enc)), np.zeros(len(y_train_2_enc))))
return (np.concatenate((np.array(x_train_enc), np.array(x_train_enc))),
np.concatenate((np.array(y_train_1_enc), np.array(y_train_2_enc))),
labels_train)
def load_dev_data_for_eval(data_devset, data_model_outputs_dev, vocab_size, max_input_seq_len, max_output_seq_len, delex=True):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_devset or '\\rest_e2e\\' in data_devset:
x_dev, y_dev_1 = read_rest_e2e_dataset_dev(data_devset)
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_devset or '\\tv\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_tv_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_devset or '\\laptop\\' in data_devset:
x_dev, y_dev_1, y_dev_2 = read_laptop_dataset_dev(data_devset)
if data_model_outputs_dev is not None:
y_dev_2 = read_predictions(data_model_outputs_dev)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_dev_1 = [preprocess_utterance(y) for y in y_dev_1]
y_dev_2 = [preprocess_utterance(y) for y in y_dev_2]
# produce sequences of extracted words from the meaning representations (MRs) in the devset
x_dev_seq = []
for i, mr in enumerate(x_dev):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_dev_1[i] = delex_sample(mr_dict, y_dev_1[i], dataset=dataset_name, utterance_only=True)
y_dev_2[i] = delex_sample(mr_dict, y_dev_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_dev_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_dev_seq[i].extend([key, val])
else:
x_dev_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the devset
x_dev_enc = token_seq_to_idx_seq(x_dev_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_1_enc = token_seq_to_idx_seq(y_dev_1, y_word2idx, max_output_seq_len)
# produce sequences of indexes from the utterances in the devset
y_dev_2_enc = token_seq_to_idx_seq(y_dev_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the devset
labels_dev = np.concatenate((np.ones(len(y_dev_1_enc)), np.zeros(len(y_dev_2_enc))))
return (np.concatenate((np.array(x_dev_enc), np.array(x_dev_enc))),
np.concatenate((np.array(y_dev_1_enc), np.array(y_dev_2_enc))),
labels_dev)
def load_test_data_for_eval(data_testset, data_model_outputs_test, vocab_size, max_input_seq_len, max_output_seq_len, delex=False):
dataset_name = ''
slot_sep = ''
val_sep = ''
val_sep_end = None
if '/rest_e2e/' in data_testset or '\\rest_e2e\\' in data_testset:
x_test, _ = read_rest_e2e_dataset_test(data_testset)
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'rest_e2e'
slot_sep = ','
val_sep = '['
val_sep_end = ']'
elif '/tv/' in data_testset or '\\tv\\' in data_testset:
x_test, _, y_test = read_tv_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'tv'
slot_sep = ';'
val_sep = '='
elif '/laptop/' in data_testset or '\\laptop\\' in data_testset:
x_test, _, y_test = read_laptop_dataset_test(data_testset)
if data_model_outputs_test is not None:
y_test = read_predictions(data_model_outputs_test)
dataset_name = 'laptop'
slot_sep = ';'
val_sep = '='
else:
raise FileNotFoundError
# parse the utterances into lists of words
y_test = [preprocess_utterance(y) for y in y_test]
#y_test_1 = [preprocess_utterance(y) for y in y_test_1]
#y_test_2 = [preprocess_utterance(y) for y in y_test_2]
# produce sequences of extracted words from the meaning representations (MRs) in the testset
x_test_seq = []
for i, mr in enumerate(x_test):
mr_dict = OrderedDict()
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
if delex == True:
# delexicalize the MR and the utterance
y_test[i] = delex_sample(mr_dict, y_test[i], dataset=dataset_name)
#y_test_1[i] = delex_sample(mr_dict, y_test_1[i], dataset=dataset_name, utterance_only=True)
#y_test_2[i] = delex_sample(mr_dict, y_test_2[i], dataset=dataset_name)
# convert the dictionary to a list
x_test_seq.append([])
for key, val in mr_dict.items():
if len(val) > 0:
x_test_seq[i].extend([key, val])
else:
x_test_seq[i].append(key)
# load the source vocabulary
with io.open('data/eval_vocab_source.json', 'r', encoding='utf8') as f_x_vocab:
x_vocab = json.load(f_x_vocab)
x_idx2word = [word[0] for word in x_vocab]
x_idx2word.insert(0, '<PADDING>')
x_idx2word.append('<NA>')
x_word2idx = {word: idx for idx, word in enumerate(x_idx2word)}
# load the target vocabulary
with io.open('data/eval_vocab_target.json', 'r', encoding='utf8') as f_y_vocab:
y_vocab = json.load(f_y_vocab)
y_idx2word = [word[0] for word in y_vocab]
y_idx2word.insert(0, '<PADDING>')
y_idx2word.append('<NA>')
y_word2idx = {token: idx for idx, token in enumerate(y_idx2word)}
# produce sequences of indexes from the MRs in the test set
x_test_enc = token_seq_to_idx_seq(x_test_seq, x_word2idx, max_input_seq_len)
# produce sequences of indexes from the utterances in the test set
y_test_enc = token_seq_to_idx_seq(y_test, y_word2idx, max_output_seq_len)
#y_test_1_enc = token_seq_to_idx_seq(y_test_1, y_word2idx, max_output_seq_len)
#y_test_2_enc = token_seq_to_idx_seq(y_test_2, y_word2idx, max_output_seq_len)
# produce the list of the target labels in the test set
labels_test = np.ones(len(y_test_enc))
#labels_test = np.concatenate((np.ones(len(y_test_1_enc)), np.zeros(len(y_test_2_enc))))
return (np.array(x_test_enc),
np.array(y_test_enc),
labels_test,
x_idx2word,
y_idx2word)
#return (np.concatenate((np.array(x_test_enc), np.array(x_test_enc))),
# np.concatenate((np.array(y_test_1_enc), np.array(y_test_2_enc))),
# labels_test,
# x_idx2word,
# y_idx2word)
# ---- AUXILIARY FUNCTIONS ----
def init_training_data(data_trainset, data_devset):
if 'rest_e2e' in data_trainset and 'rest_e2e' in data_devset:
x_train, y_train = read_rest_e2e_dataset_train(data_trainset)
x_dev, y_dev = read_rest_e2e_dataset_dev(data_devset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_trainset and 'video_game' in data_devset:
x_train, y_train = read_video_game_dataset_train(data_trainset)
x_dev, y_dev = read_video_game_dataset_dev(data_devset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_trainset and 'tv' in data_devset:
x_train, y_train, _ = read_tv_dataset_train(data_trainset)
x_dev, y_dev, _ = read_tv_dataset_dev(data_devset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_trainset and 'laptop' in data_devset:
x_train, y_train, _ = read_laptop_dataset_train(data_trainset)
x_dev, y_dev, _ = read_laptop_dataset_dev(data_devset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_trainset and 'hotel' in data_devset:
x_train, y_train, _ = read_hotel_dataset_train(data_trainset)
x_dev, y_dev, _ = read_hotel_dataset_dev(data_devset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}, {1}'.format(data_trainset, data_devset))
return {
'dataset_name': dataset_name,
'data': (x_train, y_train, x_dev, y_dev),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def init_test_data(data_testset):
if 'rest_e2e' in data_testset:
x_test, y_test = read_rest_e2e_dataset_test(data_testset)
dataset_name = 'rest_e2e'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'video_game' in data_testset:
x_test, y_test = read_video_game_dataset_test(data_testset)
dataset_name = 'video_game'
da_sep = '('
da_sep_end = ')'
slot_sep = ', '
val_sep = '['
val_sep_end = ']'
elif 'tv' in data_testset:
x_test, y_test, _ = read_tv_dataset_test(data_testset)
dataset_name = 'tv'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'laptop' in data_testset:
x_test, y_test, _ = read_laptop_dataset_test(data_testset)
dataset_name = 'laptop'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
elif 'hotel' in data_testset:
x_test, y_test, _ = read_hotel_dataset_test(data_testset)
dataset_name = 'hotel'
da_sep = '('
da_sep_end = ')'
slot_sep = ';'
val_sep = '='
val_sep_end = None
else:
raise ValueError('Unexpected file name or path: {0}'.format(data_testset))
return {
'dataset_name': dataset_name,
'data': (x_test, y_test),
'separators': (da_sep, da_sep_end, slot_sep, val_sep, val_sep_end)
}
def read_rest_e2e_dataset_train(data_trainset):
# read the training data from file
df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']
x_train = df_train.mr.tolist()
y_train = df_train.ref.tolist()
return x_train, y_train
def read_rest_e2e_dataset_dev(data_devset):
# read the development data from file
df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']
x_dev = df_dev.mr.tolist()
y_dev = df_dev.ref.tolist()
return x_dev, y_dev
def read_rest_e2e_dataset_test(data_testset):
# read the test data from file
df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']
x_test = df_test.iloc[:, 0].tolist()
y_test = []
if df_test.shape[1] > 1:
y_test = df_test.iloc[:, 1].tolist()
return x_test, y_test
def read_video_game_dataset_train(data_trainset):
# read the training data from file
df_train = pd.read_csv(data_trainset, header=0, encoding='utf8') # names=['mr', 'ref']
x_train = df_train.mr.tolist()
y_train = df_train.ref.tolist()
return x_train, y_train
def read_video_game_dataset_dev(data_devset):
# read the development data from file
df_dev = pd.read_csv(data_devset, header=0, encoding='utf8') # names=['mr', 'ref']
x_dev = df_dev.mr.tolist()
y_dev = df_dev.ref.tolist()
return x_dev, y_dev
def read_video_game_dataset_test(data_testset):
# read the test data from file
df_test = pd.read_csv(data_testset, header=0, encoding='utf8') # names=['mr', 'ref']
x_test = df_test.iloc[:, 0].tolist()
y_test = []
if df_test.shape[1] > 1:
y_test = df_test.iloc[:, 1].tolist()
return x_test, y_test
def read_tv_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
# convert plural nouns to "[noun] -s" or "[noun] -es" form
for i, utt in enumerate(y_train):
y_train[i] = replace_plural_nouns(utt)
for i, utt in enumerate(y_train_alt):
y_train_alt[i] = replace_plural_nouns(utt)
return x_train, y_train, y_train_alt
def read_tv_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
# convert plural nouns to "[noun] -s" or "[noun] -es" form
for i, utt in enumerate(y_dev):
y_dev[i] = replace_plural_nouns(utt)
for i, utt in enumerate(y_dev_alt):
y_dev_alt[i] = replace_plural_nouns(utt)
return x_dev, y_dev, y_dev_alt
def read_tv_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = pd.read_json(f_testset, encoding='utf8')
x_test = df_test.iloc[:, 0].tolist()
y_test = df_test.iloc[:, 1].tolist()
y_test_alt = df_test.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_test):
x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_test, y_test, y_test_alt
def read_laptop_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_train, y_train, y_train_alt
def read_laptop_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_dev, y_dev, y_dev_alt
def read_laptop_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = pd.read_json(f_testset, encoding='utf8')
x_test = df_test.iloc[:, 0].tolist()
y_test = df_test.iloc[:, 1].tolist()
y_test_alt = df_test.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_test):
x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_test, y_test, y_test_alt
def read_hotel_dataset_train(path_to_trainset):
with io.open(path_to_trainset, encoding='utf8') as f_trainset:
# Skip the comment block at the beginning of the file
f_trainset, _ = skip_comment_block(f_trainset, '#')
# read the training data from file
df_train = pd.read_json(f_trainset, encoding='utf8')
x_train = df_train.iloc[:, 0].tolist()
y_train = df_train.iloc[:, 1].tolist()
y_train_alt = df_train.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_train):
x_train[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_train, y_train, y_train_alt
def read_hotel_dataset_dev(path_to_devset):
with io.open(path_to_devset, encoding='utf8') as f_devset:
# Skip the comment block at the beginning of the file
f_devset, _ = skip_comment_block(f_devset, '#')
# read the development data from file
df_dev = pd.read_json(f_devset, encoding='utf8')
x_dev = df_dev.iloc[:, 0].tolist()
y_dev = df_dev.iloc[:, 1].tolist()
y_dev_alt = df_dev.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_dev):
x_dev[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_dev, y_dev, y_dev_alt
def read_hotel_dataset_test(path_to_testset):
with io.open(path_to_testset, encoding='utf8') as f_testset:
# Skip the comment block at the beginning of the file
f_testset, _ = skip_comment_block(f_testset, '#')
# read the test data from file
df_test = pd.read_json(f_testset, encoding='utf8')
x_test = df_test.iloc[:, 0].tolist()
y_test = df_test.iloc[:, 1].tolist()
y_test_alt = df_test.iloc[:, 2].tolist()
# TODO: remove from here and use the universal DA extraction instead
# transform the MR to contain the DA type as the first slot
for i, mr in enumerate(x_test):
x_test[i] = preprocess_mr_for_tv_laptop(mr, '(', ';', '=')
return x_test, y_test, y_test_alt
def read_predictions(path_to_predictions):
# read the test data from file
with io.open(path_to_predictions, encoding='utf8') as f_predictions:
y_pred = f_predictions.readlines()
return y_pred
def skip_comment_block(fd, comment_symbol):
"""Reads the initial lines of the file (represented by the file descriptor) corresponding to a comment block.
All consecutive lines starting with the given symbol are considered to be part of the comment block.
"""
comment_block = ''
line_beg = fd.tell()
line = fd.readline()
while line != '':
if not line.startswith(comment_symbol):
fd.seek(line_beg)
break
comment_block += line
line_beg = fd.tell()
line = fd.readline()
return fd, comment_block
def replace_plural_nouns(utt):
stemmer = WordNetLemmatizer()
pos_tags = nltk.pos_tag(nltk.word_tokenize(utt))
tokens_to_replace = []
tokens_new = []
for token, tag in pos_tags:
#if tag == 'NNS':
if token in ['inches', 'watts']:
tokens_to_replace.append(token)
tokens_new.append(split_plural_noun(token, stemmer))
for token_to_replace, token_new in zip(tokens_to_replace, tokens_new):
utt = utt.replace(token_to_replace, token_new)
return utt
def split_plural_noun(word, stemmer):
stem = stemmer.lemmatize(word)
if stem not in word or stem == word:
return word
suffix = word.replace(stem, '')
return stem + ' -' + suffix
def replace_commas_in_mr_values(mr, val_sep, val_sep_end):
mr_new = ''
val_beg_cnt = 0
val_end_cnt = 0
for c in mr:
# If comma inside a value, replace the comma with placeholder
if c == ',' and val_beg_cnt > val_end_cnt:
mr_new += config.COMMA_PLACEHOLDER
continue
# Keep track of value beginning and end
if c == val_sep:
val_beg_cnt += 1
elif c == val_sep_end:
val_end_cnt += 1
mr_new += c
return mr_new
def put_back_commas_in_mr_values(mrs):
return [mr.replace(config.COMMA_PLACEHOLDER, ',') for mr in mrs]
def preprocess_da_in_mr(mr, separators):
# Unpack separators
da_sep, da_sep_end, slot_sep, val_sep, val_sep_end = separators
# If no DA indication is expected in the data, return the MR unchanged
if da_sep is None:
return mr
# Verify if DA type is indicated at the beginning of the MR
da_sep_idx = mr.find(da_sep)
slot_sep_idx = mr.find(slot_sep)
val_sep_idx = mr.find(val_sep)
if da_sep_idx < 0 or 0 <= slot_sep_idx < da_sep_idx or 0 <= val_sep_idx < da_sep_idx:
return mr
# Extract the DA type from the beginning of the MR
da_type = mr[:da_sep_idx].lstrip('?') # Strip the '?' symbol present in Laptop and TV datasets
slot_value_pairs = mr[da_sep_idx + 1:]
if da_sep_end is not None:
slot_value_pairs = slot_value_pairs.rstrip(da_sep_end)
# Convert the extracted DA to the slot-value form and prepend it to the remainder of the MR
mr_new = 'da' + val_sep + da_type
if val_sep_end is not None:
mr_new += val_sep_end
if len(slot_value_pairs) > 0:
mr_new += slot_sep + slot_value_pairs
return mr_new
# TODO: merge with the above function
def preprocess_mr_for_tv_laptop(mr, da_sep, slot_sep, val_sep):
sep_idx = mr.find(da_sep)
da_type = mr[:sep_idx].lstrip('?')
slot_value_pairs = mr[sep_idx:].strip('()')
mr_new = 'da=' + da_type
if len(slot_value_pairs) > 0:
mr_new += slot_sep + slot_value_pairs
mr_modified = ''
for slot_value in mr_new.split(slot_sep):
slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep)
# If the value is enclosed in apostrophes, remove them
if value_orig.startswith('\'') and value_orig.endswith('\''):
value_orig = value_orig[1:-1]
mr_modified += slot + val_sep + value_orig + slot_sep
mr_new = mr_modified[:-1]
if da_type in ['compare', 'suggest']:
slot_counts = {}
mr_modified = ''
for slot_value in mr_new.split(slot_sep):
slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep)
if slot in ['da', 'position']:
mr_modified += slot
else:
slot_counts[slot] = slot_counts.get(slot, 0) + 1
mr_modified += slot + str(slot_counts[slot])
mr_modified += val_sep + value_orig + slot_sep
mr_new = mr_modified[:-1]
return mr_new
def preprocess_mr(mr, separators):
# Transform the MR to list the DA type as the first slot, if its indication is present in the MR
mr_new = preprocess_da_in_mr(mr, separators)
# Replace commas in values if comma is the slot separator
if separators[2].strip() == ',' and separators[4] is not None:
mr_new = replace_commas_in_mr_values(mr_new, separators[3], separators[4])
return mr_new
def preprocess_utterance(utt):
return ' '.join(word_tokenize(utt.lower()))
def parse_slot_and_value(slot_value, val_sep, val_sep_end=None):
sep_idx = slot_value.find(val_sep)
if sep_idx > -1:
# Parse the slot
slot = slot_value[:sep_idx].strip()
# Parse the value
if val_sep_end is not None:
value = slot_value[sep_idx + 1:-1].strip()
else:
value = slot_value[sep_idx + 1:].strip()
else:
# Parse the slot
if val_sep_end is not None:
slot = slot_value[:-1].strip()
else:
slot = slot_value.strip()
# Set the value to the empty string
value = ''
slot_processed = slot.replace(' ', '').lower()
if not slot_processed.startswith('__'):
slot_processed = slot_processed.replace('_', '')
value = value.replace(config.COMMA_PLACEHOLDER, ',')
# TODO: fix the cases where a period is in the value
# TODO: (e.g., the suggest DA file (2 slots) or verify_attribute DA file (4 slots) in the video game dataset)
value_processed = ' '.join(word_tokenize(value.lower()))
return slot_processed, value_processed, slot, value
def delex_sample(mr, utterance=None, dataset=None, slots_to_delex=None, mr_only=False, input_concat=False, utterance_only=False):
"""Delexicalizes a single sample (MR and the corresponding utterance).
By default, the slots 'name', 'near' and 'food' are delexicalized (for the E2E dataset).
All fields (E2E): name, near, area, food, customer rating, familyFriendly, eatType, priceRange
"""
if not mr_only and utterance is None:
raise ValueError('the \'utterance\' argument must be provided when \'mr_only\' is False.')
if slots_to_delex is not None:
delex_slots = slots_to_delex
else:
if dataset == 'rest_e2e':
delex_slots = ['name', 'near', 'food']
elif dataset == 'video_game':
delex_slots = ['name', 'releaseyear', 'expreleasedate', 'developer']
elif dataset == 'tv':
delex_slots = ['name', 'family', 'hdmiport', 'screensize', 'price', 'audio', 'resolution', 'powerconsumption', 'color', 'count']
elif dataset == 'laptop':
delex_slots = ['name', 'family', 'processor', 'memory', 'drive', 'battery', 'weight', 'dimension', 'design', 'platform', 'warranty', 'count']
elif dataset == 'hotel':
delex_slots = ['name', 'address', 'postcode', 'area', 'near', 'phone', 'count']
else:
# By default, assume the dataset is 'rest_e2e'
delex_slots = ['name', 'near', 'food']
# Sort the slots to be delexed in a descending order of their value's length (to avoid delexing of a value that is
# a substring of another value to be delexed)
delex_slots_sorted = [(s, v) for s, v in mr.items()
if s.rstrip(string.digits) in delex_slots and v not in ['dontcare', 'none', '']]
delex_slots_sorted = [s for s, v in sorted(delex_slots_sorted, key=lambda x: len(x[1]), reverse=True)]
mr_update = {}
# for slot, value in mr.items():
for slot in delex_slots_sorted:
value = mr[slot]
if value not in ['dontcare', 'none', '']:
# Assemble a placeholder token for the value
placeholder = create_placeholder(slot, value)
values_alt = [value]
# Specify special rules for individual slots, including alternative representations of the values
if slot == 'address':
if 'street' in value:
values_alt.append(re.sub(r'\b{}\b'.format('street'), 'st', value))
elif 'avenue' in value:
values_alt.append(re.sub(r'\b{}\b'.format('avenue'), 'ave', value))
elif slot == 'name':
# If name is contained in the developer slot value, delexicalize the developer slot first
if not mr_only and 'developer' in mr and value in mr['developer']:
dev_placeholder = create_placeholder('developer', mr['developer'])
dev_val_preproc = ' '.join(word_tokenize(mr['developer']))
utterance = re.sub(r'\b{}\b'.format(dev_val_preproc), dev_placeholder, utterance)
mr_update['developer'] = dev_placeholder
elif slot in ['developer', 'expreleasedate']:
values_alt = [value.replace(';', ',')]
utterance_delexed = utterance
if not mr_only:
for val in values_alt:
# Replace the value (whole-word matches only) with the placeholder
utterance_delexed = re.sub(r'\b{}\b'.format(val), placeholder, utterance)
if utterance_delexed != utterance:
break
# Do not replace value with a placeholder token unless there is an exact match in the utterance
if slot not in mr_update and (mr_only or utterance_delexed != utterance or slot == 'name'):
mr_update[slot] = placeholder
utterance = utterance_delexed
else:
if input_concat:
mr_update[slot] = value.replace(' ', '_')
if not utterance_only:
for slot, new_value in mr_update.items():
mr[slot] = new_value
if not mr_only:
# Tokenize punctuation missed by tokenizer (such as after years and numbers in titles) before delexicalization
utterance = utterance.replace(config.DELEX_SUFFIX + ',', config.DELEX_SUFFIX + ' ,')
utterance = utterance.replace(config.DELEX_SUFFIX + '.', config.DELEX_SUFFIX + ' .')
return utterance
def counterfeit_sample(mr, utt, target_dataset=None, slots_to_replace=None, slot_value_dict=None):
"""Counterfeits a single E2E sample (MR and the corresponding utterance).
"""
mr_counterfeit = {}
utt_counterfeit = utt
if slots_to_replace is None:
if target_dataset == 'rest_e2e':
slots_to_replace = ['name', 'near', 'food']
elif target_dataset == 'video_game':
slots_to_replace = ['name', 'releaseyear', 'expreleasedate', 'developer']
elif target_dataset == 'tv':
slots_to_replace = ['name', 'family', 'hdmiport', 'screensize', 'price', 'audio', 'resolution', 'powerconsumption', 'color', 'count']
elif target_dataset == 'laptop':
slots_to_replace = ['name', 'family', 'processor', 'memory', 'drive', 'battery', 'weight', 'dimension', 'design', 'platform', 'warranty', 'count']
elif target_dataset == 'hotel':
slots_to_replace = ['name', 'address', 'postcode', 'area', 'near', 'phone', 'count']
else:
slots_to_replace = []
if target_dataset == 'video_game':
for slot_orig, value_orig in mr.items():
slot_counterfeit = slot_orig
value_counterfeit = value_orig
if slot_orig.rstrip(string.digits) in slots_to_replace:
# Substitute the slot with the corresponding slot from the target domain
slot_counterfeit = e2e_slot_to_video_game_slot(slot_orig)
while slot_counterfeit in mr_counterfeit:
slot_counterfeit = e2e_slot_to_video_game_slot(slot_orig)
if slot_orig == 'food':
# If value mentioned in the MR verbatim, replace with a sampled value from the target domain
if value_orig in utt_counterfeit:
value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])
value_realization = value_counterfeit
utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)
# Replace related keywords/phrases with alternatives matching the target domain
if slot_counterfeit == 'releaseyear':
phrase_counterfeit1 = random.choice(['was released in', 'came out in'])
phrase_counterfeit2 = random.choice(['released in', 'from'])
elif slot_counterfeit == 'expreleasedate':
phrase_counterfeit1 = random.choice(['will be released on', 'is expected to come out', 'is coming out on'])
phrase_counterfeit2 = random.choice(['to be released on', 'expected to be released on', 'slated for release on'])
else:
phrase_counterfeit1 = ''
phrase_counterfeit2 = ''
utt_counterfeit = re.sub(r'\bserves\b', phrase_counterfeit1, utt_counterfeit)
utt_counterfeit = re.sub(r'\bserving\b', phrase_counterfeit2, utt_counterfeit)
utt_counterfeit = re.sub(r'\bprovides\b', phrase_counterfeit1, utt_counterfeit)
utt_counterfeit = re.sub(r'\bproviding\b', phrase_counterfeit2, utt_counterfeit)
utt_counterfeit = re.sub(r'\bfood\b', '', utt_counterfeit)
elif slot_orig == 'customerrating':
# If value mentioned in the MR verbatim, replace with a sampled value from the target domain
if value_orig in utt_counterfeit:
value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])
value_realization = value_counterfeit
utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)
# Replace related keywords/phrases with alternatives matching the target domain
if slot_counterfeit == 'rating':
phrase_counterfeit = 'rating'
elif slot_counterfeit == 'esrb':
phrase_counterfeit = 'esrb rating'
else:
phrase_counterfeit = ''
for w in ['customer ratings', 'customer rating', 'ratings', 'rating']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
elif slot_orig == 'pricerange':
# If value mentioned in the MR verbatim, replace with a sampled value from the target domain
if value_orig in utt_counterfeit:
value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])
if ',' in value_counterfeit:
value_items = [val.strip() for val in value_counterfeit.split(',')]
value_items_shuffled = random.sample(value_items, len(value_items))
value_realization = ', '.join(value_items_shuffled[:-1]) + ' and ' + value_items_shuffled[-1]
else:
value_realization = value_counterfeit
utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)
# Replace related keywords/phrases with alternatives matching the target domain
if slot_counterfeit == 'playerperspective':
phrase_counterfeit = 'perspective'
else:
phrase_counterfeit = ''
for w in ['price range', 'priced', 'prices', 'price']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
elif slot_orig == 'familyfriendly':
if slot_counterfeit == 'hasmultiplayer':
phrase_counterfeit = 'multiplayer'
elif slot_counterfeit == 'availableonsteam':
phrase_counterfeit = 'steam'
elif slot_counterfeit == 'haslinuxrelease':
phrase_counterfeit = 'linux'
elif slot_counterfeit == 'hasmacrelease':
phrase_counterfeit = 'mac'
else:
phrase_counterfeit = ''
for w in ['families', 'children', 'kids', 'family', 'child', 'kid']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
for w in ['-friendly', ' friendly']:
utt_counterfeit = re.sub(r'\b{}\b'.format(w), ' supporting', utt_counterfeit)
utt_counterfeit = re.sub(r'\ballow', 'offer', utt_counterfeit)
elif slot_orig == 'area':
# If value mentioned in the MR verbatim, replace with a sampled value from the target domain
if value_orig in utt_counterfeit:
value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])
if ',' in value_counterfeit:
value_items = [val.strip() for val in value_counterfeit.split(',')]
value_items_shuffled = random.sample(value_items, len(value_items))
value_realization = ', '.join(value_items_shuffled[:-1]) + ' and ' + value_items_shuffled[-1]
else:
value_realization = value_counterfeit
utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)
# Replace related keywords/phrases with alternatives matching the target domain
if slot_counterfeit == 'platforms':
phrase_counterfeit = random.choice(['available for', 'available on', 'released for', 'released on'])
else:
phrase_counterfeit = ''
for w in ['located in']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
for w in ['area']:
phrase_counterfeit = 'platform' + ('s' if ',' in value_counterfeit else '')
utt_counterfeit = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
elif slot_orig == 'eattype':
# If value mentioned in the MR verbatim, replace with a sampled value from the target domain
if value_orig in utt_counterfeit:
value_counterfeit = random.choice(slot_value_dict[slot_counterfeit])
if ',' in value_counterfeit:
value_items = [val.strip() for val in value_counterfeit.split(',')]
value_items_shuffled = random.sample(value_items, len(value_items))
value_realization = ' '.join(value_items_shuffled) + ' game'
else:
value_realization = value_counterfeit + ' game'
utt_counterfeit = re.sub(value_orig, value_realization, utt_counterfeit)
elif slot_orig == 'near':
if slot_counterfeit == 'developer':
phrase_counterfeit = random.choice(['developed by', 'made by'])
else:
phrase_counterfeit = ''
for w in ['located near', 'situated by']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
utt_counterfeit = re.sub(r'\bnear\b', random.choice(['by', 'from']), utt_counterfeit)
mr_counterfeit[slot_counterfeit] = value_counterfeit
# Replace general keywords/phrases with alternatives matching the target domain
for w in ['place', 'venue', 'establishment', 'eatery', 'restaurant']:
utt_counterfeit = re.sub(r'\b{}\b'.format(w), 'game', utt_counterfeit)
utt_counterfeit = re.sub(r'\bnear\b'.format(w), 'for', utt_counterfeit)
elif target_dataset == 'hotel':
for slot_orig, value_orig in mr.items():
slot_counterfeit = slot_orig
value_counterfeit = value_orig
if slot_orig.rstrip(string.digits) in slots_to_replace:
# Substitute the slot with the corresponding slot from the target domain
slot_counterfeit = e2e_slot_to_hotel_slot(slot_orig)
while slot_counterfeit in mr_counterfeit:
slot_counterfeit = e2e_slot_to_hotel_slot(slot_orig)
if slot_orig == 'familyfriendly':
if slot_counterfeit == 'acceptscreditcards':
phrase_counterfeit = 'credit card'
elif slot_counterfeit == 'dogsallowed':
phrase_counterfeit = 'dog'
elif slot_counterfeit == 'hasinternet':
phrase_counterfeit = 'internet'
else:
phrase_counterfeit = ''
for w in ['families', 'children', 'kids']:
utt_counterfeit = re.sub(r'\b{}\b'.format(w),
phrase_counterfeit + 's' if phrase_counterfeit != 'internet' else phrase_counterfeit,
utt)
if utt_counterfeit != utt:
break
if utt_counterfeit == utt:
for w in ['family', 'child', 'kid']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
elif slot_orig == 'customerrating' or slot_orig == 'food':
if slot_counterfeit == 'address':
phrase_counterfeit = 'address'
elif slot_counterfeit == 'phone':
phrase_counterfeit = 'phone number'
elif slot_counterfeit == 'postcode':
phrase_counterfeit = 'postcode'
else:
phrase_counterfeit = ''
if slot_orig == 'customerrating':
for w in ['customer rating of', 'customer ratings', 'customer rating', 'ratings', 'rating']:
utt_counterfeit_sub = re.sub(r'\b{}\b'.format(w), phrase_counterfeit, utt_counterfeit)
if utt_counterfeit_sub != utt_counterfeit:
utt_counterfeit = utt_counterfeit_sub
break
elif slot_orig == 'food':
utt_counterfeit = re.sub(r'\b{}\b'.format('food'), phrase_counterfeit, utt_counterfeit)
else:
raise AttributeError('provided domain does not exist')
mr_counterfeit[slot_counterfeit] = value_counterfeit
return mr_counterfeit, utt_counterfeit
def create_placeholder(slot, value):
"""Assemble a placeholder token for the given slot value."""
vowels = 'aeiou'
placeholder = config.DELEX_PREFIX
value = value.lower()
if value[0] in vowels:
placeholder += 'vow_'
else:
placeholder += 'con_'
if slot in ['name', 'developer']:
if value.startswith(('the ', 'a ', 'an ')):
placeholder += 'det_'
elif slot == 'food':
if 'food' not in value:
placeholder += 'cuisine_'
placeholder += (slot + config.DELEX_SUFFIX)
return placeholder
def e2e_slot_to_hotel_slot(slot):
"""Map an E2E slot onto a slot in the Hotel domain. If there are multiple tokens in the corresponding category
in the Hotel domain, randomly pick one from that category.
"""
slot_map = {
'food': ['address', 'phone', 'postcode'],
'customerrating': ['address', 'phone', 'postcode'],
'familyfriendly': ['acceptscreditcards', 'dogsallowed', 'hasinternet'],
'eattype': ['type']
}
if slot in slot_map:
if len(slot_map[slot]) == 1:
return slot_map[slot][0]
else:
return random.choice(slot_map[slot])
else:
return slot
def e2e_slot_to_video_game_slot(slot):
"""Map an E2E slot onto a slot in the Video Game domain. If there are multiple tokens in the corresponding category
in the Video Game domain, randomly pick one from that category.
"""
slot_map = {
'food': ['releaseyear', 'expreleasedate'], # delexed
'customerrating': ['rating', 'esrb'],
'pricerange': ['playerperspective'],
'familyfriendly': ['hasmultiplayer', 'availableonsteam', 'haslinuxrelease', 'hasmacrelease'], # boolean
'area': ['platforms'],
'eattype': ['genres'],
'near': ['developer'] # delexed
}
if slot in slot_map:
if len(slot_map[slot]) == 1:
return slot_map[slot][0]
else:
return random.choice(slot_map[slot])
else:
return slot
def token_seq_to_idx_seq(token_seqences, token2idx, max_output_seq_len):
# produce sequences of indexes from the utterances in the training set
idx_sequences = np.zeros((len(token_seqences), max_output_seq_len), dtype=np.int32) # padding implicitly present, as the index of the padding token is 0
for i, token_seq in enumerate(token_seqences):
for j, token in enumerate(token_seq):
# truncate long utterances
if j >= max_output_seq_len:
break
# represent each token with the corresponding index
if token in token2idx:
idx_sequences[i][j] = token2idx[token]
else:
idx_sequences[i][j] = token2idx['<NA>']
return idx_sequences
# ---- SCRIPTS ----
def count_unique_mrs(dataset, filename):
"""Counts unique MRs in the datasets and prints the statistics. (Requires the initial comment blocks in
the TV and Laptop data files to be manually removed first.)
"""
if filename.lower().endswith('.csv'):
df = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename), header=0, encoding='utf8')
elif filename.lower().endswith('.json'):
df = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8')
else:
raise ValueError('Unexpected file type. Please provide a CSV or a JSON file as input.')
# Remove delexicalized placeholders, if present
df.iloc[:, 0] = df.iloc[:, 0].replace(r'__.*?__', '', regex=True)
print('Unique MRs (' + dataset + ' -> ' + filename + '): ', end='')
print(len(df.iloc[:, 0].unique()), '/', len(df.iloc[:, 0]))
def count_mr_overlap(dataset, filename1, filename2):
"""Counts unique MRs in the datasets and prints the statistics. (Requires the initial comment blocks in
the TV and Laptop data files to be manually removed first.)
"""
if filename1.lower().endswith('.csv') and filename2.lower().endswith('.csv'):
df1 = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename1), header=0, encoding='utf8')
df2 = pd.read_csv(os.path.join(config.DATA_DIR, dataset, filename2), header=0, encoding='utf8')
elif filename1.lower().endswith('.json') and filename2.lower().endswith('.json'):
df1 = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename1), encoding='utf8')
df2 = pd.read_json(os.path.join(config.DATA_DIR, dataset, filename2), encoding='utf8')
else:
raise ValueError('Unexpected file type. Please provide two CSV or two JSON files as input.')
# Remove delexicalized placeholders, if present
df1.iloc[:, 0] = df1.iloc[:, 0].replace(r'__.*?__', '', regex=True)
df2.iloc[:, 0] = df2.iloc[:, 0].replace(r'__.*?__', '', regex=True)
# Identify the samples whose MR matches one in the other file
df1_overlap = df1[df1.mr.isin(df2.mr)]
df2_overlap = df2[df2.mr.isin(df1.mr)]
print('Overlapping MRs (' + dataset + '):')
print('-> ' + filename1 + ':\t' + str(len(df1_overlap)) + ' out of ' + str(len(df1)))
print('-> ' + filename2 + ':\t' + str(len(df2_overlap)) + ' out of ' + str(len(df2)))
print()
def verify_slot_order(dataset, filename):
"""Verifies whether the slot order in all MRs corresponds to the desired order.
"""
slots_ordered = ['name', 'eattype', 'food', 'pricerange', 'customerrating', 'area', 'familyfriendly', 'near']
mrs_dicts = []
# Read in the data
data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))
mrs, utterances = data_cont['data']
_, _, slot_sep, val_sep, val_sep_end = data_cont['separators']
for i, mr in enumerate(mrs):
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, _, _, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value_orig
mrs_dicts.append(mr_dict)
for mr_dict in mrs_dicts:
slots = list(mr_dict.keys())
cur_idx = 0
for slot in slots:
if slot in slots_ordered:
slot_idx = slots.index(slot)
rightmost_idx = slots_ordered.index(slot)
if slot_idx <= rightmost_idx and rightmost_idx >= cur_idx:
cur_idx = rightmost_idx
else:
print('TEST FAILED: {0} has index {1} in the MR, but the order requires index {2}.'.format(
slot, slot_idx, slots_ordered.index(slot)))
def filter_samples_by_da_type_csv(dataset, filename, das_to_keep):
"""Create a new CSV data file by filtering only those samples in the given dataset that contain an MR
with one of the desired DA types.
"""
if not filename.lower().endswith('.csv'):
raise ValueError('Unexpected file type. Please provide a CSV file as input.')
data_filtered = []
# Read in the data
data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))
mrs, utterances = data_cont['data']
_, _, slot_sep, val_sep, val_sep_end = data_cont['separators']
# Append the opening parenthesis to the DA names, so as to avoid matching DAs whose names have these as prefixes
das_to_keep = tuple(da + '(' for da in das_to_keep)
# Filter MRs with the desired DA types only
for mr, utt in zip(mrs, utterances):
if mr.startswith(das_to_keep):
data_filtered.append([mr, utt])
# Save the filtered dataset to a new file
filename_out = os.path.splitext(filename)[0] + ' [filtered].csv'
pd.DataFrame(data_filtered).to_csv(os.path.join(config.DATA_DIR, dataset, filename_out),
header=['mr', 'ref'],
index=False,
encoding='utf8')
def filter_samples_by_da_type_json(dataset, filename, das_to_keep):
"""Create a new JSON data file by filtering only those samples in the given dataset that contain an MR
with one of the desired DA types.
"""
if not filename.lower().endswith('.json'):
raise ValueError('Unexpected file type. Please provide a JSON file as input.')
data_filtered = []
with io.open(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8') as f_dataset:
# Skip and store the comment at the beginning of the file
f_dataset, comment_block = skip_comment_block(f_dataset, '#')
# Read the dataset from file
data = json.load(f_dataset, encoding='utf8')
# Append the opening parenthesis to the DA names, so as to avoid matching DAs whose names have these as prefixes
das_to_keep = tuple(da + '(' for da in das_to_keep)
# Filter MRs with the desired DA types only
for sample in data:
mr = sample[0]
if mr.startswith(das_to_keep):
data_filtered.append(sample)
# Save the filtered dataset to a new file
filename_out = os.path.splitext(filename)[0] + ' [filtered].json'
with io.open(os.path.join(config.DATA_DIR, dataset, filename_out), 'w', encoding='utf8') as f_dataset_filtered:
f_dataset_filtered.write(comment_block)
json.dump(data_filtered, f_dataset_filtered, indent=4, ensure_ascii=False)
def filter_samples_by_slot_count_csv(dataset, filename, min_count=None, max_count=None, eliminate_position_slot=True):
"""Create a new CSV data file by filtering only those samples in the given dataset that contain an MR
with the number of slots in the desired range.
"""
if not filename.lower().endswith('.csv'):
raise ValueError('Unexpected file type. Please provide a CSV file as input.')
data_filtered = []
# Read in the data
data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))
mrs, utterances = data_cont['data']
_, _, slot_sep, val_sep, val_sep_end = data_cont['separators']
for mr, utt in zip(mrs, utterances):
mr_dict = OrderedDict()
cur_min_count = min_count or 0
cur_max_count = max_count or 20
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
_, _, slot_orig, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot_orig] = value_orig
if 'da' in mr_dict:
cur_min_count += 1
cur_max_count += 1
if 'position' in mr_dict:
if eliminate_position_slot:
if mr_dict['position'] == 'inner':
continue
elif mr_dict['position'] == 'outer':
mr = mr.replace(', position[outer]', '')
cur_min_count += 1
cur_max_count += 1
if min_count is not None and len(mr_dict) < cur_min_count or \
max_count is not None and len(mr_dict) > cur_max_count:
continue
data_filtered.append([mr, utt])
# Save the filtered dataset to a new file
filename_out = ''.join(filename.split('.')[:-1])
if min_count is not None:
filename_out += '_min{}'.format(min_count)
if max_count is not None:
filename_out += '_max{}'.format(max_count)
filename_out += '_slots.csv'
pd.DataFrame(data_filtered).to_csv(os.path.join(config.DATA_DIR, dataset, filename_out),
header=['mr', 'ref'],
index=False,
encoding='utf8')
def filter_samples_by_slot_count_json(dataset, filename, min_count=None, max_count=None, eliminate_position_slot=True):
"""Create a new JSON data file by filtering only those samples in the given dataset that contain an MR
with the number of slots in the desired range.
"""
if not filename.lower().endswith('.json'):
raise ValueError('Unexpected file type. Please provide a JSON file as input.')
data_filtered = []
with io.open(os.path.join(config.DATA_DIR, dataset, filename), encoding='utf8') as f_dataset:
# Skip and store the comment at the beginning of the file
f_dataset, comment_block = skip_comment_block(f_dataset, '#')
# Read the dataset from file
data = json.load(f_dataset, encoding='utf8')
data_cont = init_test_data(os.path.join(config.DATA_DIR, dataset, filename))
_, _, slot_sep, val_sep, val_sep_end = data_cont['separators']
# Filter MRs with a number of slots in the desired range only
for sample in data:
mr = sample[0]
mr_dict = OrderedDict()
cur_min_count = min_count or 0
cur_max_count = max_count or 20
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
_, _, slot_orig, value_orig = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot_orig] = value_orig
if 'da' in mr_dict:
cur_min_count += 1
cur_max_count += 1
if 'position' in mr_dict:
if eliminate_position_slot:
if mr_dict['position'] == 'inner':
continue
elif mr_dict['position'] == 'outer':
mr = mr.replace(';position=outer', '')
cur_min_count += 1
cur_max_count += 1
if min_count is not None and len(mr_dict) < cur_min_count or \
max_count is not None and len(mr_dict) > cur_max_count:
continue
data_filtered.append([mr, sample[1], sample[2]])
# Save the filtered dataset to a new file
filename_out = ''.join(filename.split('.')[:-1])
if min_count is not None:
filename_out += '_min{}'.format(min_count)
if max_count is not None:
filename_out += '_max{}'.format(max_count)
filename_out += '_slots.json'
with io.open(os.path.join(config.DATA_DIR, dataset, filename_out), 'w', encoding='utf8') as f_dataset_filtered:
f_dataset_filtered.write(comment_block)
json.dump(data_filtered, f_dataset_filtered, indent=4, ensure_ascii=False)
def counterfeit_dataset_from_e2e(filename, target_dataset, out_type='csv', slot_value_dict_path=None):
"""Creates a counterfeit target dataset from the E2E restaurant dataset by mapping the E2E slots onto similar
slots in the target domain. Boolean slots are handled by heuristically replacing the corresponding mention
in the reference utterance to reflect the slot from the target domain that replaced the original E2E one.
The counterfeit dataset is stored in a JSON format.
"""
source_slots = ['name', 'eattype', 'food', 'pricerange', 'customerrating', 'area', 'familyfriendly', 'near']
data_counterfeit = []
data_out = []
# Read in the data
data_cont = init_test_data(os.path.join(config.E2E_DATA_DIR, filename))
mrs, utterances = data_cont['data']
_, _, slot_sep, val_sep, val_sep_end = data_cont['separators']
# Preprocess the utterances
utterances = [preprocess_utterance(utt) for utt in utterances]
if slot_value_dict_path is not None:
with open(slot_value_dict_path, 'r', encoding='utf8') as f_slot_values:
slot_value_dict = json.load(f_slot_values)
else:
slot_value_dict = None
for mr, utt in zip(mrs, utterances):
mr_dict = OrderedDict()
# Extract the slot-value pairs into a dictionary
for slot_value in mr.split(slot_sep):
slot, value, _, _ = parse_slot_and_value(slot_value, val_sep, val_sep_end)
mr_dict[slot] = value
# Delexicalize the MR and the utterance
data_counterfeit.append(counterfeit_sample(mr_dict, utt,
target_dataset=target_dataset,
slots_to_replace=source_slots,
slot_value_dict=slot_value_dict))
if target_dataset in ['video_game']:
for mr, utt in data_counterfeit:
mr_str = mr_to_string(mr, da='inform')
data_out.append([mr_str, utt])
elif target_dataset in ['laptop', 'tv', 'hotel']:
for mr, utt in data_counterfeit:
mr_str = 'inform('
for slot, val in mr.items():
mr_str += slot + '=\'' + val + '\';'
mr_str = mr_str[:-1] + ')'
data_out.append([mr_str, utt, utt])
# Save the counterfeit dataset to a new file
if out_type == 'csv':
filename_out = os.path.splitext(filename)[0] + ' [counterfeit {}].csv'.format(target_dataset)
df_out =
|
pd.DataFrame(data_out, columns=['mr', 'ref'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e =
|
DataFrame(columns=["first"])
|
pandas.DataFrame
|
# Authors: JG, DC
# Date: 6/3/2021
# Purpose: cleans the csv resulting from fuzzy matching
# Filename: A2_post_fuzzy_cleaning.py
import pandas as pd
import numpy as np
import random
import re
import recordlinkage
import time
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# adapts regex to find patter in a string
def find_pattern(string, pat):
res = re.findall(pat, string)
if (len(res) > 0):
return True
else:
return False
# finds the most occuring item in the column
def find_mode(col_of_interest):
list_version = list(col_of_interest.values)
values = sorted(list_version, key = list_version.count, reverse=True) ## sorted the adj_only list in descending order
values_no_dups = list(dict.fromkeys(values)) ## remove duplicates while preserving order to get top 5
return values_no_dups[0]
# forms a representative application dataframe by using an assigned technique based on the column data type
def form_representative(df, col_to_groupby):
print('**** FORMING REPS ****')
list_of_reps = []
for one in df[col_to_groupby].unique():
temp_df = df.loc[df[col_to_groupby] == one].copy()
to_add = {}
for col in temp_df:
col_type = df.dtypes[col]
if (col_type == "int64"):
to_add[col] = temp_df[col].mean(skipna = True)
elif (col_type == "object"):
to_add[col] = find_mode(temp_df[col])
elif (col_type == "float64"):
to_add[col] = temp_df[col].mean(skipna = True)
elif (col_type == "datetime64[ns]"):
if (find_pattern(str(col),r'START')):
to_add[col] = temp_df[col].min()
elif (find_pattern(str(col),r'END')):
to_add[col] = temp_df[col].max()
else:
to_add[col] = temp_df[col].min()
else:
print("Other type")
list_of_reps.append(to_add)
res = pd.DataFrame(list_of_reps)
print("**** DONE FORMING REPS *****")
return res
# -------- DRIVER CODE --------------
# Read in the fuzzy matching results csv from A1_fuzzy Matching
res =
|
pd.read_csv('../output/fuzzyMatchResult.csv')
|
pandas.read_csv
|
#!python
'''
Copyright 2017 - 2021 Vale
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy as np
import pandas as pd
try:
import pyvista as pv
except:
# we use this base class in enviroments that dont support VTK
class pv(object):
class UniformGrid(object):
pass
def read(*argv):
pass
''' GetDataObjectType
PolyData == 0
VTK_STRUCTURED_GRID = 2
VTK_RECTILINEAR_GRID = 3
VTK_UNSTRUCTURED_GRID = 4
UniformGrid == 6
VTK_MULTIBLOCK_DATA_SET = 13
'''
def pv_read(fp):
''' simple import safe pyvista reader '''
if pv is None: return
return pv.read(fp)
def pv_save(meshes, fp, binary=True):
''' simple import safe pyvista writer '''
if pv is None: return
if not isinstance(meshes, list):
meshes.save(fp, binary)
elif len(meshes) == 1:
meshes[0].save(fp, binary)
else:
pv.MultiBlock(meshes).save(fp, binary)
def vtk_cells_to_flat(cells):
r = []
p = 0
while p < len(cells):
n = cells[p]
r.extend(cells[p+1:p+1+n])
p += n + 1
return r
def vtk_cells_to_faces(cells):
faces = vtk_cells_to_flat(cells)
return np.reshape(faces, (len(faces) // 3, 3))
def vtk_flat_to_cells(flat, nodes = None):
#print(flat)
#print(nodes)
if nodes is None:
nodes = pd.Series(np.arange(len(flat)), flat.index)
n = 0
cells = []
for i in flat.index[::-1]:
n += 1
cells.insert(0, nodes[i])
if flat[i] == 0:
cells.insert(0, n)
n = 0
return np.array(cells)
def pd_detect_xyz(df, z = True):
xyz = None
dfcs = set(df.columns)
for s in [['x','y','z'], ['mid_x','mid_y','mid_z'], ['xworld','yworld','zworld'], ['xc','yc','zc']]:
if z == False:
s.pop()
for c in [str.lower, str.upper,str.capitalize]:
cs = list(map(c, s))
if dfcs.issuperset(cs):
xyz = cs
break
else:
continue
# break also the outter loop if the inner loop ended due to a break
break
if xyz is None and z:
return pd_detect_xyz(df, False)
return xyz
def vtk_nf_to_mesh(nodes, faces):
if len(nodes) == 0:
return pv.PolyData()
if len(faces) == 0:
return pv.PolyData(np.array(nodes))
meshfaces = np.hstack(np.concatenate((np.full((len(faces), 1), 3, dtype=np.int_), faces), 1))
return pv.PolyData(np.array(nodes), meshfaces)
def vtk_df_to_mesh(df, xyz = None):
if pv is None: return
if xyz is None:
xyz = pd_detect_xyz(df)
if xyz is None:
print('geometry/xyz information not found')
return None
print("xyz:",','.join(xyz))
if len(xyz) == 2:
xyz.append('z')
if 'z' not in df:
if '0' in df:
# geotiff first/only spectral channel
print('using first channel as Z value')
df['z'] = df['0']
else:
print('using 0 as Z value')
df['z'] = 0
#pdata = df[xyz].dropna(0, 'all')
#pdata.fillna(0, inplace=True)
pdata = df[xyz]
if 'n' in df and df['n'].max() > 0:
if 'node' in df:
cells = vtk_flat_to_cells(df['n'], df['node'])
nodes = df['node'].drop_duplicates().sort_values()
pdata = pdata.loc[nodes.index]
else:
cells = vtk_flat_to_cells(df['n'])
mesh = pv.PolyData(pdata.values.astype(np.float), cells)
else:
mesh = pv.PolyData(pdata.values.astype(np.float))
if 'colour' in df:
mesh.point_arrays['colour'] = df.loc[pdata.index, 'colour']
return mesh
# dmbm_to_vtk
def vtk_dmbm_to_ug(df):
''' datamine block model to uniform grid '''
df_min = df.min(0)
xyzc = ['XC','YC','ZC']
size = df_min[['XINC','YINC','ZINC']].astype(np.int_)
dims = np.add(df_min[['NX','NY','NZ']] ,1).astype(np.int_)
origin = df_min[['XMORIG','YMORIG','ZMORIG']]
grid = pv.UniformGrid(dims, size, origin)
n_predefined = 13
vl = [df.columns[_] for _ in range(13, df.shape[1])]
cv = [dict()] * grid.GetNumberOfCells()
for i,row in df.iterrows():
cell = grid.find_closest_cell(row[xyzc].values)
if cell >= 0:
cv[cell] = row[vl].to_dict()
cvdf = pd.DataFrame.from_records(cv)
for v in vl:
grid.cell_arrays[v] = cvdf[v]
return grid
def vtk_plot_meshes(meshes, point_labels=False):
if pv is None: return
p = pv.Plotter()
c = 0
for mesh in meshes:
if mesh is not None:
p.add_mesh(mesh, opacity=0.5)
if point_labels:
p.add_point_labels(mesh.points, np.arange(mesh.n_points))
c += 1
if c:
print("display", c, "meshes")
p.show()
def vtk_mesh_to_df(mesh):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = series.to_timestamp(how="start")
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq="A-DEC"):
return date_range(
to_datetime("1/1/2001") + delta,
to_datetime("12/31/2009") + delta,
freq=freq,
)
delta = timedelta(hours=23)
result = series.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = series.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
result = series.to_timestamp(how="end")
exp_index = exp_index +
|
Timedelta(1, "s")
|
pandas.Timedelta
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split, cross_val_score
from imblearn.under_sampling import RandomUnderSampler
from process_loaded_data import check_if_many_relative_followers_to_friends
from datetime import datetime
from pymongo import MongoClient
from tweet_scrape_processor import process_tweet
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from dill import pickle
"""
This module is used to create the random forest ensemble that will classify
a user as real or fake based on a single tweet and the user data embedded
in that json object
Previous research in the area of classifying twitter users as real or fake
has done so by using class A (lightweight) and class B (costlier) features.
Lightweight features include everything that you can get from a single tweet
(total tweets, follower, likes, account creation date) as these are embedded
in the json object that one can get when downloading a tweet via twitter's
API. Costlier features include a user's tweet history,
meaning the tweets themselves.
The contribution to the research community of this lightweight classifier is
a classification method that relies solely on class A features. The approach
is as follows: a) create features from user's account history (total likes,
total tweets, total followers, total friends, etc) b) create features that
express relative volume (total likes divided by total number of followers,
total tweets divided by total number of friends, etc) as it was observed that
some accounts have hundreds and thousands of tweets but very few people in
their network c) create features that express behavior rate (total likes per
day, total tweets per day, total likes per friends per day) as the account
creation date is available in the json object and it was observed that fake
accounts do "machine gun" tweeting where they tweet very frequently in a small
period of time. These set of features was added in order to also make the
model less naive to new users
No features took the content or the words of the tweet into account (i.e. NLP
based prediction) as the premise is that a human is always behind the message
being artificially propagated. The behavior captured by the tweet was taken
into account by looking at hashtag usage, mentions, whether the tweet was
favorited by another person, etc.
The classification model is a random forest ensemble made up of three random
forest models.
Random Forest 1 (RF1) takes in account history features and relative volume
features
Random Forest 2 (RF2) takes in behavior rate features that look at account
history features per day and relative volume features per day
Random Forest 3 (RF3) takes in the predicted probabilities of Random Forest
1 and Random Forest 2, along with all of these models features, and then
makes the final prediction.
The final Random Forest is able to balance out the work of the previous ones
by understanding the user patterns along the two major facets: account
history and account behavior rate.
The ten fold cross validated accuracy of RF1 is 97%, RF2 has 95%, and RF3
has 98%. Previous research using this dataset achieved these kinds of scores
as well. However, they did so with class A and class B features. The
contribution of this work is that this kind of performance was attained
using only class A features.
To run this, just run the function:
create_ensemble_model()
Todo:
* train the model with more samples from today's set of Twitter's
false negatives so that the model can understand the patterns of
the spammers of today
"""
def view_feature_importances(df, model):
"""
Args:
df (pandas dataframe): dataframe which has the original data
model (sklearn model): this is the sklearn classification model that
has already been fit (work with tree based models)
Returns:
nothing, this just prints the feature importances in descending order
"""
columns = df.columns
features = model.feature_importances_
featimps = []
for column, feature in zip(columns, features):
featimps.append([column, feature])
print(pd.DataFrame(featimps, columns=['Features',
'Importances']).sort_values(by='Importances',
ascending=False))
def evaluate_model(model, X_train, y_train):
"""
Args:
model (sklearn classification model): this model from sklearn that
will be used to fit the data and to see the 10 fold cross val score of
X_train (2d numpy array): this is the feature matrix
y_train (1d numpy array): this is the array of targets
Returns:
prints information about the model's accuracy using 10
fold cross validation
model (sklearn classification model): the model that has already been
fit to the data
"""
print(np.mean(cross_val_score(model, X_train, y_train,
cv=10, n_jobs=-1, verbose=10)))
model.fit(X_train, y_train)
return model
def write_model_to_pkl(model, model_name):
"""
Args:
model_name (str): this is the name of the model
model (sklearn fit model): the sklearn classification model
that will be saved to a pkl file
Returns:
nothing, saves the model to a pkl file
"""
with open('models/{}_model.pkl'.format(model_name), 'w+') as f:
pickle.dump(model, f)
def view_classification_report(model, X_test, y_test):
"""
Args
model (sklearn classification model): this model from sklearn that
will has already been fit
X_test (2d numpy array): this is the feature matrix
y_test (1d numpy array): this is the array of targets
Returns
nothing, this is just a wrapper for the classification report
"""
print(classification_report(y_test, model.predict(X_test)))
def gridsearch(paramgrid, model, X_train, y_train):
"""
Args:
paramgrid (dictionary): a dictionary of lists where the keys are the
model's tunable parameters and the values are a list of the
different parameter values to search over
X_train (2d numpy array): this is the feature matrix
y_train (1d numpy array): this is the array of targets
Returns:
best_model (sklearn classifier): a fit sklearn classifier with the
best parameters from the gridsearch
gridsearch (gridsearch object): the gridsearch object that has
already been fit
"""
gridsearch = GridSearchCV(model,
paramgrid,
n_jobs=-1,
verbose=10,
cv=10)
gridsearch.fit(X_train, y_train)
best_model = gridsearch.best_estimator_
print('these are the parameters of the best model')
print(best_model)
print('\nthese is the best score')
print(gridsearch.best_score_)
return best_model, gridsearch
def balance_classes(sm, X, y):
"""
Args:
sm (imblearn class): this is an imbalance learn oversampling or
undersampling class
X (2d numpy array): this is the feature matrix
y (1d numpy array): this is the array of the targets
Returns:
X (2d numpy array): this is the balanced feature matrix
y (1d numpy array): this is the corresponding balanced target array
Returns X and y after being fit with the resampling method
"""
X, y = sm.fit_sample(X, y)
return X, y
def load_all_training_data():
"""
Args:
- none
Returns:
df (pandas dataframe): the training dataframe with the ff
things done to it:
a) protected accounts dropped
b) irrelevant columns removed
"""
df = pd.read_csv('data/all_user_data.csv')
df = df.query('protected != 1')
df.drop(['profile_image_url_https',
'profile_sidebar_fill_color',
'profile_text_color',
'profile_background_color',
'profile_link_color',
'profile_image_url',
'profile_background_image_url_https',
'profile_banner_url',
'profile_background_image_url',
'profile_background_tile',
'profile_sidebar_border_color',
'default_profile',
'file',
'time_zone',
'screen_name',
'utc_offset',
'protected'], axis=1, inplace=True)
return df
def get_most_recent_tweets_per_user():
"""
Args:
none
Returns
df (pandas dataframe): Returns a dataframe with only one tweet per
row which is the MOST recent tweet recorded for that user_id
"""
tweetdf = pd.read_csv('data/training_tweets.csv')
tweetdf.timestamp = pd.to_datetime(tweetdf.timestamp)
index = tweetdf.groupby('user_id').apply(lambda x: np.argmax(x.timestamp))
tweetdf = tweetdf.loc[index.values]
tweetdf.reset_index().drop('Unnamed: 0', axis=1, inplace=True)
tweetdf.drop('Unnamed: 0', axis=1, inplace=True)
return tweetdf
def load_master_training_df():
"""
Args:
none
Returns
df (pandas dataframe): Returns dataframe combining most recent tweet
info with user info. notes on the columns:
updated - when the account was last updated
geo_enabled - if the account is geo enabled
description - text which has the user input self-description
verified - if the account is verified or not
followers_count - number of followers
location - string, location
default_profile_image - binary, yes or no
listed_count - how many times the account was listed
statuses count - number of tweets posted
friends_count - number of accounts the user is following
name - user specified user name
lang - user specified user language (CANNOT BE USED)
favourites_count - number of items favourited
url - user specified url
created_at - date the account was created
user_id - twitter assigned user id (unique in the twittersphere)
favorite_count - times the tweet was favorited
num_hashtags - number of hashtags used in the tweet
text - the tweet contents
source - the device used to upload the tweet
num_mentions - number of users mentioned in the tweet
timestamp - timestamp of the tweet
geo - if the tweet was geo localized or not
place - user specified place of the tweet
retweet_count - number of times the tweet was retweeted
"""
df = load_all_training_data()
tweetdf = get_most_recent_tweets_per_user()
users_who_tweeted = set(tweetdf.user_id.apply(int))
df = df[df.id.isin(users_who_tweeted)]
df['user_id'] = df['id']
df = pd.merge(df, tweetdf, on='user_id')
df.drop(['id',
'label_x',
'reply_count',
'file'], axis=1, inplace=True)
df.updated = pd.to_datetime(df.updated)
df.created_at = df.created_at.apply(convert_created_time_to_datetime)
account_age = df.timestamp - df.created_at
account_age = map(get_account_age_in_days, account_age.values)
df['account_age'] = account_age
return df
def get_account_age_in_days(numpy_time_difference):
"""
Args
numpy_time_difference (numpy timedelta): a numpy timedelta object
that is the difference between the user's account creation date
and the date of their most recent tweet
Return
account_age (int)
"""
return int(numpy_time_difference/1000000000/60/60/24)+1
def convert_created_time_to_datetime(datestring):
"""
Args:
datestring (str): a string object either as a date or
a unix timestamp
Returns
datetime_object (pandas datetime object): the converted string as
a datetime object
"""
if len(datestring) == 30:
return
|
pd.to_datetime(datestring)
|
pandas.to_datetime
|
import Functions
import pandas as pd
import matplotlib.pyplot as plt
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def cleaning(df):
# Importing Bot user names
bots = pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';')
# Removing bots from the data
df = df[~df.author.isin(bots.bot_names)]
# Removing any NA's
df.dropna()
# Cleaning the text data, fuld af pis i bunden der prøver hvert enkelt før de røg sammen, slet hvis du ikke er intra
keeplist = "?.!,'_-"
import re
Adj_comment = pd.DataFrame(
[re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)'
r'[\S]*\s?|(/u/|u/)\S+|(/r/|r/)\S+|[\x00-\x1f\x7f-\xff]|[0-9]+|(&g|&l)\S+'
r'|[^\s\w' + keeplist + ']', "", elem) for elem in df['body']], columns=['body'])
df['body'] = Adj_comment['body']
return df
period = ['2014', '2015_01', '2015_02', '2015_03', '2015_04', '2015_05', '2015_06', '2015_07', '2015_08', '2015_09',
'2015_10', '2015_11', '2015_12', '2016_01', '2016_02', '2016_03', '2016_04', '2016_05', '2016_06', '2016_07',
'2016_08', '2016_09', '2016_10',
'2016_11', '2016_12', '2017_01', '2017_02', '2017_03', '2017_04', '2017_05', '2017_06', '2017_07', '2017_08',
'2017_09',
'2017_10', '2017_11', '2017_12', '2018_01', '2018_02', '2018_03', '2018_04', '2018_05', '2018_06', '2018_07',
'2018_08',
'2018_09', '2018_10', '2018_11', '2018_12', '2019_01', '2019_02', '2019_03', '2019_04', '2019_05', '2019_06',
'2019_07',
'2019_08', '2019_09']
dfAllData = pd.DataFrame()
for sPeriod in period:
query = r"""
#standardSQL
SELECT author, subreddit, created_utc, score, controversiality, body
FROM `fh-bigquery.reddit_comments.{}`
WHERE REGEXP_CONTAINS(body, r'(?i)\b Dash\b')
""".format(sPeriod)
dfData = Functions.collect_big_query(sQuery=query)
print(sPeriod + ' Collected')
print(sPeriod + ' cleaned')
dfAllData = dfAllData.append(dfData)
del dfData
dfAllData.to_csv('Dash_sentiment.csv')
coin_list = ['BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS']
dfSubRed = pd.DataFrame()
for scoin in coin_list:
dfTemp =
|
pd.read_csv(scoin + '_sentiment.csv', index_col=0)
|
pandas.read_csv
|
"""
Base strategy class.
Gets inherited by specific strategies.
"""
from ast import Raise
from fractions import Fraction as frac
import pandas as pd
class LoopComplete(Exception):
"""
Raised to indicate that the loop has reached the end of the price_period csv.
"""
def unfrac(fraction, round_to=4):
"""Turn a fraction into a float rounded to the fourth decimal."""
float_num = round(float(fraction), round_to)
# # To avoid a DeprecationWarning from directly going from fractions to floats,
# # do the calculation explicitly.
# numerator = fraction.numerator
# denominator = fraction.denominator
# float_num = round(numerator/denominator, round_to)
return float_num
def price_period_results_path(csv):
"""Path to price_periods results csv files."""
# Make sure we have the file ending
if csv[-4:] != '.csv':
csv = csv + '.csv'
return f'results\\price_periods\\{csv}'
def returns_history_path(csv):
"""Path to returns_history results csv files."""
# Make sure we have the file ending
if csv[-4:] != '.csv':
csv = csv + '.csv'
return f'results\\returns_history\\{csv}'
def strategy_results_path(csv):
"""Path to strategy results csv files."""
# Make sure we have the file ending
if csv[-4:] != '.csv':
csv = csv + '.csv'
return f'results\\strategies\\{csv}'
def period_path(csv):
"""Path to price_period csv files."""
# Make sure we have the file ending
if csv[-4:] != '.csv':
csv = csv + '.csv'
return f'price_period_csv\\{csv}'
def full_path(csv):
"""Path to the csv_files. Used mainly for raw data."""
# Make sure we have the file ending
if csv[-4:] != '.csv':
csv = csv + '.csv'
return f'csv_files\\{csv}'
class Strategy:
"""Base strategy class, specific strategies should inherent this."""
def __init__(
self,
name,
starting_usd,
time_between_action,
price_period_name,
price_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Example:
>>> from wms.gui._management import Management
>>> management = Management()
>>> management.show_search() # Show search section
>>> management.show_add() # Show add section
>>> management.show_remove() # Show remove section
"""
import sqlite3
from datetime import datetime
import pandas as pd
import streamlit as st
from wms import sesson_state
from wms.components import *
state = sesson_state.get()
class Management:
def __init__(self, connection):
self.connection = connection
self.current_option = ""
self.tables = [table[0] for table in self.connection.cursor().execute(
"SELECT name FROM sqlite_master WHERE type='table';").fetchall()]
try:
self.tables.remove("ImportDetail")
self.tables.remove("TransactionDetail")
except AttributeError as e:
print(e)
except ValueError:
pass
self.customer_columns = customer.columns_names(self.connection)
self.shop_columns = shop.columns_names(self.connection)
self.category_columns = item_category.columns_names(self.connection)
self.item_columns = item.columns_names(self.connection)
self.imports_columns = imports.columns_names(self.connection)
self.transactions_columns = transactions.columns_names(self.connection)
def show_search(self):
with st.beta_container():
self.current_option = st.selectbox("Select table to search: ", self.tables)
col1, col2 = st.beta_columns(2)
if self.current_option == "Customer":
with col1:
st.info("""
Input name to search for customer in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
customer_id = st.number_input("Input customer id: ", step=1, value=0, min_value=0,
max_value=customer.max_id(self.connection) or 0)
elif choice == "name":
customer_name = st.text_input("Input customer name (* to search all): ", value="")
columns = st.multiselect("Select columns to show: ", self.customer_columns)
if not columns:
columns = self.customer_columns
if choice == "id":
data = customer.search_by_id(self.connection, customer_id, columns)
elif choice == "name":
data = customer.search_by_name(self.connection, customer_name, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show customer with selected column(s)", expanded=True):
st.dataframe(df)
elif self.current_option == "ItemCategory":
with col1:
st.info("""
Input name to search for category in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
category_id = st.number_input("Input category id: ", step=1, value=0, min_value=0,
max_value=item_category.max_id(self.connection) or 0)
elif choice == "name":
category_name = st.text_input("Input category name (* to search all): ", value="")
columns = st.multiselect("Select columns to search: ", self.category_columns)
if not columns:
columns = self.category_columns
if choice == "id":
data = item_category.search_by_id(self.connection, category_id, columns)
elif choice == "name":
data = item_category.search_by_name(self.connection, category_name, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show category with selected column(s)", expanded=True):
st.dataframe(df)
elif self.current_option == "Buyer":
pass
elif self.current_option == "Shop":
with col1:
st.info("""
Input name to search for shop in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
shop_id = st.number_input("Input shop id: ", step=1, value=0, min_value=0,
max_value=shop.max_id(self.connection) or 0)
elif choice == "name":
shop_name = st.text_input("Input shop name (* to search all): ", value="")
columns = st.multiselect("Select columns to show: ", self.shop_columns)
if not columns:
columns = self.shop_columns
if choice == "id":
data = shop.search_by_id(self.connection, shop_id, columns)
elif choice == "name":
data = shop.search_by_name(self.connection, shop_name, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show shop with selected column(s)", expanded=True):
st.dataframe(df)
elif self.current_option == "Imports":
with col1:
st.info("""
Input name to search for import record in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/date/shop: ", options=['id', 'date', 'shop', 'get all records'])
if choice == "id":
import_id = st.number_input("Input import id: ", step=1, value=0, min_value=0,
max_value=imports.max_id(self.connection) or 0)
elif choice == "date":
import_date = datetime.fromordinal(st.date_input("Input date: ",
min_value=
imports.get_min_max_date(self.connection)[0],
max_value=
imports.get_min_max_date(self.connection)[1],
value=
imports.get_min_max_date(self.connection)[0],
).toordinal()).strftime('%Y-%m-%d')
elif choice == "shop":
try:
shop_id = st.selectbox("Input shop id: ",
options=[i for i in range(shop.max_id(self.connection))] or [0])
except TypeError:
shop_id = None
st.warning("There is no shop in the database yet!")
columns = st.multiselect("Select columns to show: ", self.imports_columns)
if not columns:
columns = self.imports_columns
if choice == "id":
data = imports.search_by_id(self.connection, import_id, columns)
elif choice == "date":
data = imports.search_by_date(self.connection, import_date, columns)
elif choice == "shop":
data = imports.search_by_shop_id(self.connection, shop_id, columns)
elif choice == "get all records":
data = imports.search_all(self.connection, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show import with selected column(s)", expanded=True):
st.dataframe(df)
with st.beta_expander("Search for import details"):
try:
selected_id = int(st.selectbox("Choose which import record to search: ",
options=df["importID"].unique().tolist()))
except TypeError:
selected_id = None
data = import_detail.search_by_import_id(self.connection, selected_id)
st.dataframe(pd.DataFrame.from_records(data, columns=import_detail.columns_names(
self.connection)))
elif self.current_option == "Transactions":
with col1:
st.info("""
Input name to search for transaction record in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/date/status/customer/shop: ",
options=['id', 'date', 'status', 'customer', 'shop', 'get all records'])
if choice == "id":
transaction_id = st.number_input("Input transaction id: ", step=1, value=0, min_value=0,
max_value=transactions.max_id(self.connection) or 0)
elif choice == "date":
transaction_date = datetime.fromordinal(st.date_input("Input date: ",
min_value=
transactions.get_min_max_date(
self.connection)[0],
max_value=
transactions.get_min_max_date(
self.connection)[1],
value=
transactions.get_min_max_date(
self.connection)[0],
).toordinal()).strftime('%Y-%m-%d')
elif choice == "status":
transaction_status = st.radio("Transaction status: ", options=['Pending', 'Completed']).upper()
elif choice == "customer":
customer_id = st.number_input("Input customer id: ", step=1, value=0, min_value=0,
max_value=customer.max_id(self.connection) or 0)
elif choice == "shop":
try:
shop_id = st.selectbox("Input shop id: ",
options=[i for i in range(shop.max_id(self.connection))] or [0])
except TypeError:
shop_id = None
st.warning("There is no shop in the database yet!")
columns = st.multiselect("Select columns to show: ", self.transactions_columns)
if not columns:
columns = self.transactions_columns
if choice == "id":
data = transactions.search_by_id(self.connection, transaction_id, columns)
elif choice == "date":
data = transactions.search_by_date(self.connection, transaction_date, columns)
elif choice == "status":
data = transactions.search_by_status(self.connection, transaction_status, columns)
elif choice == "customer":
data = transactions.search_by_customer_id(self.connection, customer_id, columns)
elif choice == "shop":
data = transactions.search_by_shop_id(self.connection, shop_id, columns)
elif choice == "get all records":
data = transactions.search_all(self.connection, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show transaction with selected column(s)", expanded=True):
st.dataframe(df)
with st.beta_expander("Search for transaction details"):
try:
selected_id = int(st.selectbox("Choose which transaction record to search: ",
options=df["transactionID"].unique().tolist()))
except TypeError:
selected_id = None
data = transaction_detail.search_by_transaction_id(self.connection, selected_id)
st.dataframe(pd.DataFrame.from_records(data, columns=transaction_detail.columns_names(
self.connection)))
elif self.current_option == "Item":
with col1:
st.info("""
Input name to search for item in the database.
If there is no input, all entries be shown.\n
*Limit to 1000 rows.*
""")
choice = st.radio("Search by id/name/category/shop: ", options=['id', 'name', 'category', 'shop'])
if choice == "id":
item_id = st.number_input("Input item id: ", step=1, value=0, min_value=0,
max_value=item.max_id(self.connection) or 0)
elif choice == "name":
item_name = st.text_input("Input item name (* to search all): ", value="")
elif choice == "category":
category_id = st.number_input("Input category id: ", step=1, value=0, min_value=0,
max_value=item_category.max_id(self.connection) or 0)
elif choice == "shop":
shop_id = st.number_input("Input shop id: ", step=1, value=0, min_value=0,
max_value=shop.max_id(self.connection) or 0)
columns = st.multiselect("Select columns to show: ", self.item_columns)
if not columns:
columns = self.item_columns
if choice == "id":
data = item.search_by_id(self.connection, item_id, columns)
elif choice == "name":
data = item.search_by_name(self.connection, item_name, columns)
elif choice == "category":
data = item.search_by_category_id(self.connection, category_id, columns)
elif choice == "shop":
data = item.search_by_shop_id(self.connection, shop_id, columns)
df = pd.DataFrame.from_records(data, columns=columns)[:1000]
with col2:
with st.beta_expander("Show item with selected column(s)", expanded=True):
st.dataframe(df)
def show_add(self):
with st.beta_container():
self.current_option = st.selectbox("Select table to add: ", self.tables)
if self.current_option == "Customer":
customer_name = st.text_input("Input customer name: ", value="")
_last_customer_id = customer.max_id(self.connection) or -1
customer_id = _last_customer_id + 1
if st.button("Add customer"):
check = customer.insert(self.connection, customer_id, customer_name)
with st.spinner("Adding customer..."):
if check is None:
st.exception("Error when adding customer!")
st.stop()
else:
st.success("Customer was added successfully!")
data = customer.search_by_id(self.connection, customer_id)
st.dataframe(pd.DataFrame.from_records(data, columns=self.customer_columns))
elif self.current_option == "ItemCategory":
category_name = st.text_input("Input category name: ", value="")
_last_category_id = item_category.max_id(self.connection) or -1
category_id = _last_category_id + 1
if st.button("Add item category"):
check = item_category.insert(self.connection, category_id, category_name)
with st.spinner("Adding item category..."):
if check is None:
st.exception("Error when adding category!")
st.stop()
else:
st.success("Item category was added successfully!")
data = item_category.search_by_id(self.connection, category_id)
st.dataframe(pd.DataFrame.from_records(data, columns=self.category_columns))
elif self.current_option == "Buyer":
pass
elif self.current_option == "Shop":
shop_name = st.text_input("Input shop name: ", value="")
_last_shop_id = shop.max_id(self.connection) or -1
shop_id = _last_shop_id + 1
if st.button("Add shop"):
check = shop.insert(self.connection, shop_id, shop_name)
with st.spinner("Adding shop..."):
if check is None:
st.exception("Error when adding shop!")
st.stop()
else:
st.success("shop was added successfully!")
data = shop.search_by_id(self.connection, shop_id)
st.dataframe(pd.DataFrame.from_records(data, columns=self.shop_columns))
elif self.current_option == "Imports":
st.warning("Not yet implemented.")
st.stop()
elif self.current_option == "Transactions":
st.warning("Not yet implemented.")
st.stop()
elif self.current_option == "Item":
item_name = st.text_input("Input item name: ", value="")
_last_item_id = item.max_id(self.connection) or -1
item_id = _last_item_id + 1
quantity = st.number_input("Input item quantity: ", step=1, value=0, min_value=0)
categories = {}
for category in item_category.search_all(self.connection):
categories[category[0]] = category[1]
category_name = st.selectbox("Input item category: ", list(categories.values()))
category_id = None
for key, value in categories.items():
if value == category_name:
category_id = key
st.write(f"Category ID currently: {category_id}")
shops = {}
for shop in shop.search_all(self.connection):
shops[shop[0]] = shop[1]
shop_name = st.selectbox("Input shop name: ", list(shops.values()))
shop_id = None
for key, value in shops.items():
if value == shop_name:
shop_id = key
st.write(f"Shop ID currently: {shop_id}")
if st.button("Add item"):
check = item.insert(self.connection, item_id, item_name, quantity, category_id, shop_id)
with st.spinner("Adding item..."):
if check is None:
st.exception("Error when adding item!")
st.stop()
else:
st.success("shop was added successfully!")
data = item.search_by_id(self.connection, item_id)
st.dataframe(pd.DataFrame.from_records(data, columns=self.item_columns))
def show_remove(self):
tables_for_remove = self.tables.copy()
try:
tables_for_remove.remove("Imports")
tables_for_remove.remove("Transactions")
except ValueError:
pass
with st.beta_container():
self.current_option = st.selectbox("Select table to remove: ", tables_for_remove)
if self.current_option == "Customer":
st.info("""
Input id or name to search for customer to remove from the database.
If there is no input, all entries be shown.
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
customer_id = st.number_input("Input customer id: ", step=1, value=0, min_value=0,
max_value=customer.max_id(self.connection) or 0)
data = customer.search_by_id(self.connection, customer_id)
elif choice == "name":
customer_name = st.text_input("Input customer name: ", value="")
data = customer.search_by_name(self.connection, customer_name)
df = pd.DataFrame.from_records(data, columns=self.customer_columns)[:1000]
with st.beta_expander("Show all customers"):
st.dataframe(df)
with st.beta_expander("Remove customer(s)", expanded=True):
if choice == "id":
data = customer.search_by_id(self.connection, customer_id)
elif choice == "name":
selected_ids = st.multiselect("Select customer id(s): ", df["customerID"])
data = customer.search_by_name(self.connection, customer_name)
if len(df["customerID"]) == 1:
selected_ids = df["customerID"].values
try:
df = pd.concat([pd.DataFrame.from_records(data, columns=self.customer_columns).loc[
df["customerID"] == i] for i in selected_ids], ignore_index=True)
except ValueError:
pass
st.dataframe(df)
if st.button("Remove customer"):
for Cid in selected_ids:
if transactions.search_by_customer_id(self.connection, int(Cid)):
st.error(f"""
Customer {Cid} can't be removed. They have already made a transaction.
""")
else:
removed = customer.delete_by_id(self.connection, int(Cid))
st.experimental_rerun()
elif self.current_option == "ItemCategory":
st.info("""
Input id or name to search for item category to remove from the database.
If there is no input, all entries be shown.
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
category_id = st.number_input("Input category id: ", step=1, value=0, min_value=0,
max_value=item_category.max_id(self.connection) or 0)
data = item_category.search_by_id(self.connection, category_id)
elif choice == "name":
category_name = st.text_input("Input category name: ", value="")
data = item_category.search_by_name(self.connection, category_name)
df = pd.DataFrame.from_records(data, columns=self.category_columns)[:1000]
with st.beta_expander("Show all item categories"):
st.dataframe(df)
with st.beta_expander("Remove item category(s)", expanded=True):
if choice == "id":
data = item_category.search_by_id(self.connection, category_id)
elif choice == "name":
selected_ids = st.multiselect("Select category id(s): ", df["categoryID"])
data = item_category.search_by_name(self.connection, category_name)
if len(df["categoryID"]) == 1:
selected_ids = df["categoryID"].values
try:
df = pd.concat([pd.DataFrame.from_records(data, columns=self.category_columns).loc[
df["categoryID"] == i] for i in selected_ids], ignore_index=True)
except ValueError:
pass
st.dataframe(df)
if st.button("Remove item category"):
for ICid in selected_ids:
if item.search_by_category_id(self.connection, int(ICid)):
st.error(f"""
Item category {ICid} can't be removed. There is at least an item assigned to it.
""")
else:
removed = item_category.delete_by_id(self.connection, int(ICid))
st.experimental_rerun()
elif self.current_option == "Buyer":
pass
elif self.current_option == "Shop":
st.info("""
Input id or name to search for shop to remove from the database.
If there is no input, all entries be shown.
""")
choice = st.radio("Search by id/name: ", options=['id', 'name'])
if choice == "id":
shop_id = st.number_input("Input shop id: ", step=1, value=0, min_value=0,
max_value=shop.max_id(self.connection) or 0)
data = shop.search_by_id(self.connection, shop_id)
elif choice == "name":
shop_name = st.text_input("Input shop name: ", value="")
data = shop.search_by_name(self.connection, shop_name)
df = pd.DataFrame.from_records(data, columns=self.shop_columns)[:1000]
with st.beta_expander("Show all shops"):
st.dataframe(df)
with st.beta_expander("Remove shop(s)"):
if choice == "id":
data = shop.search_by_id(self.connection, shop_id)
elif choice == "name":
selected_ids = st.multiselect("Select shop id(s): ", df["shopID"])
data = shop.search_by_name(self.connection, shop_name)
if len(df["shopID"]) == 1:
selected_ids = df["shopID"].values
try:
df = pd.concat([
|
pd.DataFrame.from_records(data, columns=self.shop_columns)
|
pandas.DataFrame.from_records
|
import os
import logging
import platform
import json
import warnings
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
from io import StringIO
import alphatims.bruker
import alphatims.utils
# visualization
import panel as pn
import bokeh.server.views.ws
import plotly.express as px
import holoviews as hv
# local
import alphaviz
import alphaviz.utils
import alphaviz.io
import alphaviz.preprocessing
import alphaviz.plotting
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
def get_css_style(
file_name="dashboard_style.css",
directory=alphaviz.utils.STYLE_PATH
):
file = os.path.join(
directory,
file_name
)
with open(file) as f:
return f.read()
def init_panel():
pn.extension(raw_css=[get_css_style()])
hv.extension('bokeh')
pn.extension('plotly')
pn.extension('tabulator')
def update_config(filename, height=400, width=900, ext='svg'):
config = {
'displaylogo': False,
# 'responsive': True,
'toImageButtonOptions': {
'format': f'{ext}', # one of png, svg, jpeg, webp
'filename': f'{filename}',
'height': height,
'width': width,
'scale': 1 # Multiply title/legend/axis/canvas size by this factor
},
'modeBarButtonsToRemove': ['select2d', 'lasso2d', 'autoScale2d', 'toggleSpikelines'],
# 'scrollZoom': True,
}
return config
if platform.system() == 'Windows':
raw_folder_placeholder = r'D:\bruker\21min_HELA_proteomics'
output_folder_placeholder = r'D:\bruker\21min_HELA_proteomics\txt'
fasta_path_placeholder = r'D:\fasta_files\human.fasta'
else:
# TODO: add a linux support
raw_folder_placeholder = '/Users/test/bruker/21min_HELA_proteomics'
output_folder_placeholder = '/Users/test/bruker/21min_HELA_proteomics/txt'
fasta_path_placeholder = '/Users/test/fasta_files/human.fasta'
class BaseWidget(object):
def __init__(self, name):
self.name = name
self.__update_event = pn.widgets.IntInput(value=0)
self.depends = pn.depends(self.__update_event.param.value)
self.active_depends = pn.depends(
self.__update_event.param.value,
watch=True
)
def trigger_dependancy(self):
self.__update_event.value += 1
class HeaderWidget(object):
"""This class creates a layout for the header of the dashboard with the name of the tool and all links to the MPI website, the MPI Mann Lab page and the GitHub repo.
Parameters
----------
title : str
The name of the tool.
Attributes
----------
header_title : pn.pane.Markdown
A Panel Markdown pane that returns the title of the tool.
mpi_biochem_logo : pn.pane.PNG
A Panel PNG pane that embeds a png image file of the MPI Biochemisty logo and makes the image clickable with the link to the official website.
mpi_logo : pn.pane.JPG
A Panel JPG pane that embeds a jpg image file of the MPI Biochemisty logo and makes the image clickable with the link to the official website.
github_logo : pn.pane.PNG
A Panel PNG pane that embeds a png image file of the GitHub logo and makes the image clickable with the link to the GitHub repository of the project.
"""
def __init__(
self,
title,
img_folder_path,
github_url
):
self.layout = None
self.header_title = pn.pane.Markdown(
f'# {title}',
sizing_mode='stretch_width',
)
self.biochem_logo_path = os.path.join(
img_folder_path,
"mpi_logo.png"
)
self.mpi_logo_path = os.path.join(
img_folder_path,
"max-planck-gesellschaft.jpg"
)
self.github_logo_path = os.path.join(
img_folder_path,
"github.png"
)
self.mpi_biochem_logo = pn.pane.PNG(
self.biochem_logo_path,
link_url='https://www.biochem.mpg.de/mann',
width=60,
height=60,
align='start'
)
self.mpi_logo = pn.pane.JPG(
self.mpi_logo_path,
link_url='https://www.biochem.mpg.de/en',
height=62,
embed=True,
width=62,
margin=(5, 0, 0, 5),
css_classes=['opt']
)
self.github_logo = pn.pane.PNG(
self.github_logo_path,
link_url=github_url,
height=70,
align='end'
)
def create_layout(self):
self.layout = pn.Row(
self.mpi_biochem_logo,
self.mpi_logo,
self.header_title,
self.github_logo,
height=73,
sizing_mode='stretch_width'
)
return self.layout
class MainWidget(object):
"""This class create a layout for the main part of the dashboard with the description of the tool and a button to download the manual for the project's GUI.
Parameters
----------
description : str
The short description of the tool.
manual_path : str
The path to the GUI manual.
Attributes
----------
project_description : pn.pane.Markdown
A Panel Markdown pane that shows the description of the project.
manual : pn.widgets.FileDownload
A Panel FileDownload widget that allows to download the GUI manual of the tool.
"""
def __init__(
self,
description,
manual_path
):
self.layout = None
self.project_description = pn.pane.Markdown(
description,
margin=(10, 0, 10, 0),
css_classes=['main-part'],
align='start',
width=460
)
self.manual = pn.widgets.FileDownload(
file=manual_path,
label='Download Manual',
button_type='default',
align='center',
auto=True,
height=31,
width=200,
margin=(0, 20, 0, 0)
)
self.download_new_version_button = pn.widgets.Button(
button_type="danger",
align='center',
height=31,
width=200,
margin=(20, 20, 0, 0)
)
def create_layout(self):
latest_github_version = alphaviz.utils.check_github_version(silent=False)
if latest_github_version and latest_github_version != alphaviz.__version__:
self.download_new_version_button.name = f"Download version {latest_github_version}"
download_new_version_button = self.download_new_version_button
download_new_version_button.js_on_click(
code="""window.open("https://github.com/MannLabs/alphaviz/releases/latest")"""
)
else:
download_new_version_button = None
self.layout = pn.Row(
self.project_description,
pn.layout.HSpacer(width=500),
pn.Column(
self.manual,
download_new_version_button,
align='center',
),
background='#eaeaea',
align='center',
sizing_mode='stretch_width',
height=190,
margin=(10, 8, 10, 8),
css_classes=['background']
)
return self.layout
class DataImportWidget(BaseWidget):
def __init__(self):
super().__init__(name="Data")
self.raw_data = None
self.mq_evidence = None
self.mq_all_peptides = None
self.mq_msms = None
self.mq_protein_groups = None
self.mq_summary = None
self.diann_proteins = None
self.diann_peptides = None
self.diann_statist = None
self.predlib = None
self.model_mgr = None
self.psm_df = pd.DataFrame()
self.fasta = None
self.layout = None
self.settings = {
'path_evidence_file': str(),
'analysis_software': str()
}
self.path_raw_folder = pn.widgets.TextInput(
name='Specify the full path to the folder with unprocessed Bruker files:\u002a',
placeholder=raw_folder_placeholder,
width=900,
sizing_mode='stretch_width',
margin=(5, 15, 0, 15)
)
self.ms_file_name = pn.widgets.Select(
name='Select the raw file:\u002a',
size=10,
width=900,
sizing_mode='stretch_width',
margin=(5, 15, 0, 15)
)
self.path_output_folder = pn.widgets.TextInput(
name='Specify the full path to the output folder of any supported software analysis tool:',
placeholder=output_folder_placeholder,
width=900,
sizing_mode='stretch_width',
margin=(15, 15, 0, 15)
)
self.path_fasta_file = pn.widgets.TextInput(
# TODO: remove the fixed fasta file before release
# value='/Users/eugeniavoytik/copied/Bruker/MaxQuant_output_tables/20210413_TIMS03_EVO03_PaSk_MA_HeLa_200ng_S1-A1_1_24848.d/txt/human.fasta',
name='Specify the full path to the fasta file:',
placeholder=fasta_path_placeholder,
width=900,
sizing_mode='stretch_width',
margin=(15, 15, 15, 15)
)
self.is_prediction = pn.widgets.Checkbox(
name='Activate the prediction',
margin=(5, 0, 5, 15),
)
# UPLOAD DATA
self.upload_button = pn.widgets.Button(
name='Load Data',
button_type='primary',
height=31,
width=250,
align='center',
margin=(0, 0, 0, 0)
)
self.upload_progress = pn.indicators.Progress(
max=1,
value=1,
active=False,
bar_color='light',
width=250,
align='center',
margin=(-10, 0, 30, 0)
)
self.import_error = pn.pane.Alert(
alert_type="danger",
object='',
sizing_mode='stretch_width',
margin=(10, 0, 5, 0),
)
self.mass_dict = alphaviz.utils.get_mass_dict(
modfile=os.path.join(
alphaviz.utils.DATA_PATH,
'modifications.tsv'
),
aasfile=os.path.join(
alphaviz.utils.DATA_PATH,
'amino_acids.tsv'
),
verbose=False,
)
def create_layout(self):
dependances = {
self.path_raw_folder: [self.update_file_names, 'value'],
self.ms_file_name: [self.update_output_folder_and_fasta, 'value'],
self.upload_button: [self.load_data, 'clicks'],
}
for k in dependances.keys():
k.param.watch(
dependances[k][0],
dependances[k][1]
)
self.layout = pn.Card(
pn.Row(
pn.Column(
self.path_raw_folder,
self.ms_file_name,
self.path_output_folder,
self.path_fasta_file,
self.is_prediction,
margin=(10, 30, 10, 10),
),
pn.Spacer(sizing_mode='stretch_width'),
pn.Column(
self.upload_button,
self.upload_progress,
self.import_error,
align='center',
margin=(100, 40, 0, 0),
)
),
title='Data Import',
collapsed=False,
collapsible=True,
header_background='#eaeaea',
header_color='#333',
align='center',
sizing_mode='stretch_width',
margin=(5, 8, 10, 8),
css_classes=['background']
)
return self.layout
def update_file_names(self, *args):
try:
self.ms_file_name.options = alphaviz.preprocessing.sort_naturally(
alphaviz.io.get_filenames_from_directory(
self.path_raw_folder.value,
['d', 'hdf']
)
)
except OSError:
self.import_error.object = "#### The selected directory is not found."
def update_output_folder_and_fasta(self, *args):
try:
data_folder = os.path.join(
self.path_raw_folder.value,
self.ms_file_name.value
)
for dirpath, dirnames, filenames in os.walk(data_folder):
for filename in filenames:
if filename.endswith(".fasta"):
self.path_fasta_file.value = os.path.join(dirpath, filename)
elif filename == 'evidence.txt':
self.path_output_folder.value = dirpath
if not self.path_fasta_file.value:
for filename in os.listdir(self.path_raw_folder.value):
if filename.endswith(".fasta"):
self.path_fasta_file.value = os.path.join(self.path_raw_folder.value, filename)
except:
self.import_error.object = "#### The selected folder does not contain any .d or .hdf files."
def load_data(self, *args):
alphatims.utils.set_progress_callback(self.upload_progress)
self.settings['analysis_software'] = ''
self.model_mgr = None
self.psm_df = pd.DataFrame()
self.import_error.object = ''
self.upload_progress.value = 0
try:
self.raw_data = alphatims.bruker.TimsTOF(
os.path.join(
self.path_raw_folder.value,
self.ms_file_name.value
),
)
except:
self.import_error.object += '\n#### The selected unprocessed Bruker file is corrupted and cannot be loaded. \n#### Please select another file.',
raise OSError('The selected unprocessed Bruker file is corrupted and cannot be loaded. Please select another file.')
alphatims.utils.set_progress_callback(True)
# TODO: to change it just by changing the active=True parameter for the self.upload_progress when the bug will be fixed
self.upload_progress = pn.indicators.Progress(
active=True,
bar_color='light',
width=250,
align='center',
margin=(-10, 0, 30, 0)
)
self.layout[0][2][1] = self.upload_progress
# read the fasta file if specified
if self.path_fasta_file.value:
try:
self.fasta = alphaviz.io.read_fasta(
self.path_fasta_file.value
)
except:
self.import_error.object += "\n#### The selected fasta file cannot be loaded."
else:
self.import_error.object += "\n#### The fasta file file has not been provided."
# read analysis output files (MQ, DIA-NN, etc.) if specified
# check all files in the analysis output folder
if self.path_output_folder.value:
files = alphaviz.io.get_filenames_from_directory(
directory=self.path_output_folder.value,
extensions_list=['txt', 'tsv', 'csv']
)
mq_files = ['allPeptides.txt', 'msms.txt', 'evidence.txt', 'proteinGroups.txt', 'summary.txt']
if any(file in files for file in mq_files):
print('Reading the MaxQuant output files...')
if all(file in files for file in mq_files):
self.mq_all_peptides, self.mq_msms, self.mq_evidence, self.mq_protein_groups, self.mq_summary = alphaviz.io.import_mq_output(
mq_files,
self.path_output_folder.value,
self.ms_file_name.value.split('.')[0]
)
self.settings['analysis_software'] = 'maxquant'
else:
self.import_error.object += "\n#### The MQ output files necessary for the visualization are not found."
else:
print('Reading the DIA-NN output files...')
try:
self.diann_proteins, self.diann_peptides, self.diann_statist, diann_output_file = alphaviz.io.import_diann_output(
self.path_output_folder.value,
self.ms_file_name.value.split('.')[0],
self.fasta
)
self.diann_peptides['m/z'] = self.diann_peptides.apply(
lambda x: alphaviz.utils.calculate_mz(
prec_mass=alphaviz.utils.get_precmass(
alphaviz.utils.parse(x['Sequence_AP_mod']), self.mass_dict),
charge=x['Charge']
),
axis=1
)
self.settings['analysis_software'] = 'diann'
except BaseException:
self.import_error.object += "\n#### The DIA-NN output files necessary for the visualization are not found."
else:
self.import_error.object += "\n#### The output files of the supported software tools have not been provided."
if self.is_prediction.value:
from peptdeep.pretrained_models import ModelManager
self.model_mgr = ModelManager()
self.model_mgr.load_installed_models()
if self.settings['analysis_software'] == 'maxquant':
from alphabase.io.psm_reader import psm_reader_provider
mq_reader = psm_reader_provider.get_reader('maxquant')
mq_reader.load(
os.path.join(self.path_output_folder.value, 'evidence.txt')
)
self.psm_df = mq_reader.psm_df.groupby(
['sequence', 'mods', 'mod_sites', 'nAA', 'charge',
'spec_idx', 'rt', 'rt_norm']
)['ccs'].median().reset_index()
elif self.settings['analysis_software'] == 'diann':
from alphabase.io.psm_reader import psm_reader_provider
diann_reader = psm_reader_provider.get_reader('diann')
diann_reader.load(
os.path.join(
self.path_output_folder.value,
diann_output_file
)
)
self.psm_df = diann_reader.psm_df.groupby(
['sequence', 'mods', 'mod_sites', 'nAA', 'charge',
'spec_idx', 'rt', 'rt_norm']
)['ccs'].median().reset_index()
self.psm_df['nce'] = 30
self.psm_df['instrument'] = 'timsTOF'
# trained on more Lumos files therefore should work better
# than 'timsTOF'
self.psm_df['spec_idx'] += 1
self.model_mgr.psm_num_to_tune_rt_ccs = 1000
self.model_mgr.fine_tune_rt_model(self.psm_df)
# self.model_mgr.fine_tune_ccs_model(self.psm_df)
self.trigger_dependancy()
self.upload_progress.active = False
self.upload_progress.value = 100
class OptionsWidget(object):
def __init__(self, data):
self.data = data
self.layout = pn.Card(
title='Settings',
collapsed=True,
sizing_mode='stretch_width',
margin=(5, 8, 10, 8),
css_classes=['background']
)
def get_layout(self, *args):
return self.layout
def add_option(self, option):
self.layout.append(option)
class HeatmapOptionsWidget(object):
def __init__(self):
self.plot1_x_axis = pn.widgets.Select(
name='X-axis label',
value='m/z, Th',
options=['m/z, Th', 'Inversed IM, V·s·cm\u207B\u00B2'],
width=180,
margin=(20, 20, 20, 20),
)
self.plot1_y_axis = pn.widgets.Select(
name='Y-axis label',
value='Inversed IM, V·s·cm\u207B\u00B2',
options=['m/z, Th', 'Inversed IM, V·s·cm\u207B\u00B2'],
width=180,
margin=(20, 20, 20, 10),
)
self.heatmap_color = pn.widgets.Select(
name='Color scale',
value='fire',
options=sorted(hv.plotting.util.list_cmaps(reverse=True) + hv.plotting.util.list_cmaps(reverse=False)),
width=180,
margin=(20, 20, 20, 10),
)
self.heatmap_background = pn.widgets.ColorPicker(
name='Background color',
value='#000000',
width=180,
margin=(20, 20, 20, 10),
)
self.precursor_target_size = pn.widgets.IntInput(
name='Precursor target size',
value=20,
step=5,
start=0,
end=100,
width=180,
margin=(20, 20, 20, 10),
)
self.precursor_target_color = pn.widgets.ColorPicker(
name='Precursor target color',
value='#00008b',
width=180,
margin=(20, 20, 20, 10),
)
def create_layout(self, *args):
layout = pn.Card(
pn.Row(
self.plot1_x_axis,
self.plot1_y_axis,
self.heatmap_color,
self.heatmap_background,
self.precursor_target_size,
self.precursor_target_color
),
title='Heatmap options',
collapsed=False,
sizing_mode='stretch_width',
margin=(15, 8, 0, 8),
css_classes=['background']
)
return layout
class ToleranceOptionsWidget(object):
def __init__(self):
self.mz_tolerance = pn.widgets.FloatInput(
name='m/z Tolerance (ppm)',
value=10,
step=5,
start=0,
end=1000,
width=150,
margin=(20, 20, 20, 10),
)
self.im_tolerance = pn.widgets.FloatInput(
name='IM Tolerance (1/K0)',
value=0.05,
step=0.1,
start=0,
end=2,
width=150,
margin=(20, 20, 20, 10),
)
self.rt_tolerance = pn.widgets.FloatInput(
name='RT Tolerance (sec)',
value=30,
step=5,
start=0,
end=1000,
width=150,
margin=(20, 20, 20, 10),
)
def create_layout(self, *args):
layout = pn.Card(
pn.Row(
self.mz_tolerance,
self.im_tolerance,
self.rt_tolerance
),
title='Tolerance settings',
collapsed=False,
sizing_mode='stretch_width',
margin=(15, 8, 0, 8),
css_classes=['background']
)
return layout
class CustomizationOptionsWidget(object):
def __init__(self):
self.colorscale_qualitative = pn.widgets.Select(
name='Qualitative color scale',
value='Pastel',
options=sorted(list(set([each['y'][0] for each in px.colors.qualitative.swatches()['data']]))),
width=180,
margin=(20, 20, 20, 10),
)
self.colorscale_sequential = pn.widgets.Select(
name='Sequential color scale',
value='Viridis',
options=sorted(list(set([each['y'][0] for each in px.colors.sequential.swatches()['data']]))),
width=190,
margin=(20, 20, 20, 10),
)
self.image_save_size = pn.widgets.LiteralInput(
type=list,
name='The size of the saved plot (h, w):',
value=[400, 900],
width=200,
margin=(20, 20, 20, 10),
)
self.image_save_format = pn.widgets.Select(
name='The format of the saved plot:',
value='svg',
options=['png', 'svg', 'jpeg', 'webp'],
width=190,
margin=(20, 20, 20, 10),
)
def create_layout(self, *args):
dependances = {
self.image_save_size: [self.set_image_settings, 'value'],
self.image_save_format: [self.set_image_settings, 'value'],
}
for k in dependances.keys():
k.param.watch(
dependances[k][0],
dependances[k][1]
)
layout = pn.Card(
pn.Row(
self.colorscale_qualitative,
self.colorscale_sequential,
self.image_save_size,
self.image_save_format,
),
title='Customization options',
collapsed=False,
sizing_mode='stretch_width',
margin=(15, 8, 15, 8),
css_classes=['background']
)
return layout
def set_image_settings(self, *args):
global update_config
from functools import partial
update_config = partial(
update_config,
height=self.image_save_size.value[0], width=self.image_save_size.value[1],
ext=self.image_save_format.value
)
class TabsWidget(object):
def __init__(self, data, options):
self.layout = None
self.data = data
self.options = options
def create_layout(
self,
tab_list=None
):
self.tabs = tab_list
return self.data.depends(self.return_layout)
def return_layout(self, *args):
if self.data.raw_data is not None:
del self.layout
self.layout = pn.Tabs(
tabs_location='above',
margin=(10, 10, 5, 8),
sizing_mode='stretch_width',
)
self.layout += self.tabs
self.layout[0] = (
'Main View',
MainTab(self.data, self.options).create_layout()
)
self.layout[1] = (
'Quality Control',
QCTab(self.data, self.options).create_layout()
)
self.layout[2] = (
'Targeted Mode',
TargetModeTab(self.data, self.options).create_layout()
)
self.active = 0
# self.data.layout.collapsed = True
return self.layout
class MainTab(object):
def __init__(self, data, options):
self.data = data
self.analysis_software = ""
self.mz_tol = options.layout[0][0][0]
self.im_tol = options.layout[0][0][1]
self.rt_tol = options.layout[0][0][2]
self.heatmap_x_axis = options.layout[1][0][0]
self.heatmap_y_axis = options.layout[1][0][1]
self.heatmap_colormap = options.layout[1][0][2]
self.heatmap_background_color = options.layout[1][0][3]
self.heatmap_precursor_size = options.layout[1][0][4]
self.heatmap_precursor_color = options.layout[1][0][5]
self.colorscale_qualitative = options.layout[2][0][0]
self.colorscale_sequential = options.layout[2][0][1]
self.image_save_size = options.layout[2][0][2]
self.image_save_format = options.layout[2][0][3]
self.protein_seq = str()
self.gene_name = str()
self.ms1_ms2_frames = dict()
self.ms1_frame = None
self.merged_precursor_data = pd.DataFrame()
self.peptide = dict()
self.proteins_table = pn.widgets.Tabulator(
layout='fit_data_table',
name='Proteins table',
pagination='remote',
page_size=5,
disabled=True,
height=250,
show_index=False,
selectable=1,
formatters={
"Protein IDs": {
'type': 'link',
'urlPrefix': "https://www.uniprot.org/uniprot/",
'target': "_blank",
}
},
sizing_mode='stretch_width',
align='center',
text_align='center',
margin=(0, 5, 10, 5)
)
self.gene_name_filter = pn.widgets.AutocompleteInput(
name='Search the protein by its gene name:',
min_characters=3,
case_sensitive=False,
width=350,
margin=(0, 0, 10, 0),
)
self.gene_name_reset = pn.widgets.Button(
name='Reset proteins',
height=32,
button_type='default',
width=150,
margin=(18, 5, 0, 20),
)
self.protein_list_title = pn.pane.Markdown(
'Load a list of proteins:',
margin=(10, 5, 0, 20),
)
self.selected_peptides_reset = pn.widgets.Button(
name='Deselect peptides',
height=32,
button_type='default',
width=150,
margin=(18, 5, 0, 0),
)
self.protein_list = pn.widgets.FileInput(
accept='.txt',
margin=(22, 5, 0, 5),
width=250,
)
self.peptides_table = pn.widgets.Tabulator(
layout='fit_data_table',
pagination='remote',
page_size=8,
page=1,
disabled=True,
height=300,
show_index=False,
selectable=1,
sizing_mode='stretch_width',
align='center',
text_align='center',
margin=(0, 5, 10, 5)
)
self.protein_coverage_plot = None
self.chromatograms_plot = None
self.heatmap_ms1_plot = None
self.heatmap_ms2_plot = None
self.line_plot = None
self.x_axis_label_mq = pn.widgets.Select(
name='Select X label:',
value='Retention time',
options=['Retention time', 'Ion mobility', 'm/z'],
width=150,
align='center'
)
self.x_axis_label_diann = pn.widgets.Select(
name='Select dimension:',
value='RT dimension',
options=['RT dimension', 'RT/IM dimension'],
width=150,
align='center'
)
self.previous_frame = pn.widgets.Button(
button_type='default',
name='\u25c0 Previous frame',
sizing_mode='stretch_width',
margin=(10, 30, 30, 30)
)
self.next_frame = pn.widgets.Button(
button_type='default',
name='Next frame \u25b6',
sizing_mode='stretch_width',
margin=(10, 30, 30, 30)
)
self.plot_overlapped_frames = pn.widgets.Toggle(
name='Overlap frames',
button_type='default',
sizing_mode='stretch_width',
)
self.show_mirrored_plot = pn.widgets.Checkbox(
name='Show mirrored spectra',
disabled=False if self.data.model_mgr else True,
value=True,
margin=(20, 0, -20, 10),
)
self.export_svg_ms1_button = pn.widgets.Button(
name='Export as .svg',
button_type='default',
width=250,
align='center',
disabled=True,
margin=(25, 0, 0, 10),
)
self.export_svg_ms2_button = pn.widgets.Button(
name='Export as .svg',
button_type='default',
align='center',
disabled=True,
width=250,
margin=(25, 0, 0, 10),
)
self.export_svg_elprofiles_button = pn.widgets.Button(
name='Export as .svg',
button_type='default',
align='center',
disabled=True,
width=250,
margin=(25, 0, 0, 10),
)
self.layout = None
def create_layout(self):
self.analysis_software = self.data.settings.get('analysis_software')
self.update_gene_name_filter()
if self.analysis_software:
dependances = {
self.gene_name_reset: [self.reset_protein_table, 'clicks'],
self.selected_peptides_reset: [self.unselect_peptides, 'clicks'],
self.protein_list: [self.filter_protein_table, 'value'],
self.gene_name_filter: [self.run_after_gene_filter, 'value'],
self.proteins_table: [self.run_after_protein_selection, 'selection'],
self.peptides_table: [self.run_after_peptide_selection, 'selection'],
self.heatmap_x_axis: [self.display_heatmap_spectrum, 'value'],
self.heatmap_y_axis: [self.display_heatmap_spectrum, 'value'],
self.heatmap_colormap: [self.display_heatmap_spectrum, 'value'],
self.heatmap_background_color: [self.display_heatmap_spectrum, 'value'],
self.heatmap_precursor_size: [self.display_heatmap_spectrum, 'value'],
self.heatmap_precursor_color: [self.display_heatmap_spectrum, 'value'],
self.previous_frame: [self.display_previous_frame, 'clicks'],
self.next_frame: [self.display_next_frame, 'clicks'],
self.plot_overlapped_frames: [self.display_overlapped_frames, 'value'],
self.mz_tol: [self.display_line_spectra_plots, 'value'],
self.im_tol: [self.display_line_spectra_plots, 'value'],
self.rt_tol: [self.display_line_spectra_plots, 'value'],
self.x_axis_label_mq: [self.display_line_spectra_plots, 'value'],
self.x_axis_label_diann: [self.display_elution_profile_plots, 'value'],
self.colorscale_qualitative: [self.update_plots_color, 'value'],
self.colorscale_sequential: [self.update_plots_color, 'value'],
self.image_save_size: [self.update_plots_color, 'value'],
self.image_save_format: [self.update_plots_color, 'value'],
self.show_mirrored_plot: [self.display_mass_spectrum, 'value'],
self.export_svg_ms1_button: [self.export_svg_ms1, 'clicks'],
self.export_svg_ms2_button: [self.export_svg_ms2, 'clicks'],
self.export_svg_elprofiles_button: [self.export_svg_elprofiles, 'clicks'],
}
for k in dependances.keys():
k.param.watch(
dependances[k][0],
dependances[k][1]
)
self.dictionary = json.load(open(os.path.join(
alphaviz.utils.STYLE_PATH,
'tables_formatting.json',
)))
if self.analysis_software == 'maxquant':
self.proteins_table.value = self.data.mq_protein_groups
self.proteins_table.formatters = self.dictionary[self.analysis_software]['proteins_table']['formatters']
self.proteins_table.widths = self.dictionary[self.analysis_software]['proteins_table']['widths']
self.peptides_table.value = self.data.mq_evidence.iloc[0:0]
self.peptides_table.widths = self.dictionary[self.analysis_software]['peptides_table']['widths']
if '(EXP) Seq coverage, %' in self.data.mq_protein_groups.columns:
self.proteins_table.formatters['(EXP) Seq coverage, %'] = {"type": "progress", "max": 100, "legend": True}
elif self.analysis_software == 'diann':
self.proteins_table.selection = []
self.peptides_table.selection = []
self.proteins_table.value = self.data.diann_proteins
self.peptides_table.value = self.data.diann_peptides.iloc[0:0]
if self.analysis_software:
self.layout = pn.Column(
self.display_chromatogram(),
pn.Row(
self.gene_name_filter,
self.gene_name_reset,
self.protein_list_title,
self.protein_list,
self.selected_peptides_reset,
margin=(10, 0),
),
pn.panel(
"### Proteins table",
align='center',
margin=(-10, 10, -5, 10)
),
self.proteins_table,
pn.panel(
"### Peptides table",
align='center',
margin=(-10, 10, -5, 10)
),
self.peptides_table,
self.protein_coverage_plot,
None, # peptide description
None, # XIC plot
pn.Row(
None, # Previous frame button
None, # Next frame button
),
pn.Row(
pn.Column(
self.heatmap_ms1_plot,
None,
),
pn.Column(
self.heatmap_ms2_plot,
None,
),
sizing_mode='stretch_width',
align='center'
),
None, # Overlap frames button
None, # Show mirrored spectra checkbox
None, # Predicted peptide properties
None, # Summed MS2 spectrum
margin=(20, 10, 5, 10),
sizing_mode='stretch_width',
)
else:
self.layout = pn.Column(
self.display_chromatogram(),
margin=(20, 10, 5, 10),
sizing_mode='stretch_width',
)
return self.layout
def display_chromatogram(self, *args):
chromatograms = alphaviz.plotting.plot_chrom(
self.data.raw_data,
self.colorscale_qualitative.value,
)
chrom_widget = pn.Pane(
chromatograms,
config=update_config('Chromatograms'),
sizing_mode='stretch_width',
margin=(0, 10)
)
if self.layout:
self.layout[0] = chrom_widget
else:
return chrom_widget
def update_gene_name_filter(self):
self.proteins_table.selection = []
self.peptides_table.selection = []
self.layout = None
if self.analysis_software == 'maxquant':
self.gene_name_filter.options = self.data.mq_protein_groups['Gene names'].str.split(';').explode().unique().tolist()
elif self.analysis_software == 'diann':
self.gene_name_filter.options = self.data.diann_proteins['Gene names'].str.split(';').explode().unique().tolist()
def reset_protein_table(self, *args):
self.proteins_table.loading = True
self.peptides_table.loading = True
self.gene_name_filter.value = ''
if self.analysis_software == 'maxquant':
self.proteins_table.value = self.data.mq_protein_groups
elif self.analysis_software == 'diann':
self.proteins_table.value = self.data.diann_proteins
self.protein_list.value = b''
self.proteins_table.selection = []
self.peptides_table.loading = False
self.proteins_table.loading = False
def unselect_peptides(self, *args):
self.peptides_table.selection = []
def filter_protein_table(self, *args):
if self.protein_list.value != b'':
self.proteins_table.loading = True
self.peptides_table.loading = True
self.peptides_table.value = self.data.mq_evidence.iloc[0:0] if self.analysis_software == 'maxquant' else self.data.diann_peptides.iloc[0:0]
self.proteins_table.selection = []
predefined_list = []
for line in StringIO(str(self.protein_list.value, "utf-8")).readlines():
predefined_list.append(line.strip().upper())
if predefined_list:
if self.analysis_software == 'maxquant':
filtered_df = alphaviz.preprocessing.filter_df(
self.data.mq_protein_groups,
pattern='|'.join(predefined_list),
column='Gene names',
software='maxquant',
)
if filtered_df.empty:
self.proteins_table.value = self.data.mq_protein_groups.iloc[0:0, :]
else:
self.proteins_table.value = filtered_df
elif self.analysis_software == 'diann':
filtered_df = self.data.diann_proteins[self.data.diann_proteins['Gene names'].isin(predefined_list)]
if filtered_df.empty:
self.proteins_table.value = self.data.diann_proteins.iloc[0:0, :]
else:
self.proteins_table.value = filtered_df
else:
self.proteins_table.value = self.data.mq_protein_groups if self.analysis_software == 'maxquant' else self.data.diann_proteins
self.peptides_table.loading = False
self.proteins_table.loading = False
def run_after_gene_filter(self, *args):
self.proteins_table.loading = True
self.peptides_table.loading = True
self.proteins_table.selection = []
if self.analysis_software == 'maxquant':
self.proteins_table.value = alphaviz.preprocessing.filter_df(
self.data.mq_protein_groups,
pattern=self.gene_name_filter.value,
column='Gene names',
software='maxquant',
)
self.peptides_table.value = self.data.mq_evidence.iloc[0:0]
elif self.analysis_software == 'diann':
self.proteins_table.value = alphaviz.preprocessing.filter_df(
self.data.diann_proteins,
pattern=self.gene_name_filter.value,
column='Gene names',
software='diann',
)
self.peptides_table.value = self.data.diann_peptides.iloc[0:0]
self.peptides_table.loading = False
self.proteins_table.loading = False
def run_after_protein_selection(self, *args):
if self.proteins_table.selection:
self.peptides_table.loading = True
self.peptides_table.selection = []
if self.analysis_software == 'maxquant':
self.gene_name = self.proteins_table.value.iloc[self.proteins_table.selection[0]]['Gene names']
self.peptides_table.value = self.data.mq_evidence[self.data.mq_evidence['Gene names'] == self.gene_name]
if self.peptides_table.value.empty:
self.peptides_table.value = self.data.mq_evidence[self.data.mq_evidence['Gene names'].str.contains(self.gene_name)]
self.curr_protein_ids = [val.replace('CON__', '') if "|" not in val else val.split('|')[1].replace('CON__', '') for val in self.peptides_table.value['Leading razor protein'].sort_values(ascending=False).values][0]
elif self.analysis_software == 'diann':
self.gene_name = self.proteins_table.value.iloc[self.proteins_table.selection[0]]['Gene names']
self.curr_protein_ids = self.proteins_table.value.iloc[self.proteins_table.selection[0]]['Protein IDs']
self.peptides_table.value = alphaviz.preprocessing.filter_df(
self.data.diann_peptides,
pattern=self.gene_name,
column='Gene names',
software='diann',
)
self.peptides_table.page = 1
self.layout[7:] = [
None, # peptide description
None, # XIC plot
pn.Row(
None, # Previous frame button
None, # Next frame button
),
pn.Row(
None,
None,
sizing_mode='stretch_width',
align='center'
),
None, # Overlap frames button
None, # Show mirrored spectra checkbox
None, # Predicted peptide properties
None, # Summed MS2 spectrum
]
self.protein_seq = alphaviz.preprocessing.get_aa_seq(
self.curr_protein_ids,
self.data.fasta,
)
self.protein_coverage_plot = alphaviz.plotting.plot_sequence_coverage(
self.protein_seq,
self.gene_name,
self.peptides_table.value['Modified.Sequence'].tolist() if self.analysis_software == 'diann' else self.peptides_table.value['Modified sequence'].tolist(),
self.colorscale_qualitative.value,
self.colorscale_sequential.value,
r"\[(.*?)\]|\((.*?)\)\)?",
self.curr_protein_ids
)
if not self.protein_coverage_plot and self.analysis_software == 'maxquant':
curr_protein_ids = sorted(self.peptides_table.value['Proteins'].values[0].split(';'), reverse=True)
for prot_id in curr_protein_ids:
if '|' in prot_id:
prot_id = prot_id.split('|')[1]
self.protein_seq = alphaviz.preprocessing.get_aa_seq(
prot_id,
self.data.fasta,
)
self.protein_coverage_plot = alphaviz.plotting.plot_sequence_coverage(
self.protein_seq,
self.gene_name,
self.peptides_table.value['Modified.Sequence'].tolist() if self.analysis_software == 'diann' else self.peptides_table.value['Modified sequence'].tolist(),
self.colorscale_qualitative.value,
self.colorscale_sequential.value,
r"\[(.*?)\]|\((.*?)\)\)?",
prot_id
)
if self.protein_coverage_plot:
self.curr_protein_ids = prot_id
break
self.layout[6] = pn.Pane(
self.protein_coverage_plot,
config=update_config(f"{self.gene_name}_coverage_plot"),
align='center',
sizing_mode='stretch_width',
)
self.peptides_table.loading = False
else:
self.peptides_table.loading = True
self.peptides_table.selection = []
self.peptides_table.value = self.data.mq_evidence.iloc[0:0] if self.analysis_software == 'maxquant' else self.data.diann_peptides.iloc[0:0]
self.layout[6] = None
self.layout[7:] = [
None, # peptide description
None, # XIC plot
pn.Row(
None, # Previous frame button
None, # Next frame button
),
pn.Row(
None,
None,
sizing_mode='stretch_width',
align='center'
),
None, # Overlap frames button
None, # Show mirrored spectra checkbox
None, # Predicted peptide properties
None, # Summed MS2 spectrum
]
self.peptides_table.loading = False
def run_after_peptide_selection(self, *args):
if self.proteins_table.selection:
self.peptides_table.loading = True
if self.peptides_table.selection:
one_peptide_coverage_plot = alphaviz.plotting.plot_sequence_coverage(
self.protein_seq,
self.gene_name,
[self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Modified.Sequence']] if self.analysis_software == 'diann' else [self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Modified sequence']],
self.colorscale_qualitative.value,
self.colorscale_sequential.value,
r"\[(.*?)\]|\((.*?)\)\)?",
self.curr_protein_ids
)
self.layout[6] = pn.Pane(
one_peptide_coverage_plot,
config=update_config(f"{self.gene_name}_coverage_plot"),
align='center',
sizing_mode='stretch_width',
)
self.scan_number = [int(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['MS/MS scan number'])]
if 'dda' in self.data.raw_data.acquisition_mode:
pasef_ids = [int(pasef_id) for pasef_id in self.data.mq_all_peptides[self.data.mq_all_peptides['MS/MS scan number'].isin(self.scan_number)]['Pasef MS/MS IDs'].values[0]]
precursors = self.data.raw_data.fragment_frames[self.data.raw_data.fragment_frames.index.isin(pasef_ids)].copy()
# quick fix the AlphaTims's bug with the differences in the Frames in raw_data.fragment_frames table for .d and .hdf files
if self.data.ms_file_name.value.split('.')[-1] == 'hdf':
precursors.loc[:, 'Frame'] -= 1
self.merged_precursor_data = pd.merge(
precursors, self.data.raw_data.precursors[self.data.raw_data.precursors.Id.isin(precursors.Precursor.values)],
left_on='Precursor',
right_on='Id'
)
self.merged_precursor_data['Frame_Prec'] = list(zip(self.merged_precursor_data.Frame, self.merged_precursor_data.Precursor))
self.ms1_ms2_frames = dict(zip(self.merged_precursor_data.Parent, self.merged_precursor_data.Frame_Prec))
self.current_frame = list(self.ms1_ms2_frames.keys())[0]
self.display_line_spectra_plots()
self.display_heatmap_spectrum()
else:
self.ms2_frame = self.data.raw_data.fragment_frames[self.data.raw_data.fragment_frames.index.isin(self.scan_number)].Frame.values[0]
if self.data.ms_file_name.value.split('.')[-1] == 'hdf':
self.ms2_frame -= 1
self.ms1_frame = self.data.raw_data.frames.loc[(self.data.raw_data.frames.MsMsType == 0) & (self.data.raw_data.frames.Id < self.ms2_frame), 'Id'].values[-1]
self.peptide = {
"sequence":
self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Sequence_AP_mod'],
"charge":
self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Charge'],
"im":
self.peptides_table.value.iloc[self.peptides_table.selection[0]]['IM'],
"rt": self.peptides_table.value.iloc[self.peptides_table.selection[0]]['RT'] * 60,
"mz": self.peptides_table.value.iloc[self.peptides_table.selection[0]]['m/z'],
}
self.display_elution_profile_plots()
if not self.data.psm_df.empty:
data_slice = self.data.psm_df.loc[(self.data.psm_df.spec_idx == self.peptides_table.value.iloc[self.peptides_table.selection[0]]['MS/MS scan number']) & (self.data.psm_df.sequence == self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Sequence'])].copy()
predlib = self.data.model_mgr.predict_all(
data_slice,
predict_items=['rt', 'mobility'],
multiprocessing=False,
)
rt_pred = round(predlib['precursor_df']['rt_pred'].values[0] * self.data.raw_data.rt_max_value / 60, 3)
im_pred = round(predlib['precursor_df']['mobility_pred'].values[0], 3)
self.layout[9] = pn.panel(
f"## The predicted peptide properties: retention time = {rt_pred} min, ion mobility = {im_pred} V·s·cm\u207B\u00B2.",
css_classes=['main-part'],
sizing_mode='stretch_width',
margin=(10, 10, 20, -10),
align='center',
)
self.display_heatmap_spectrum()
else:
self.peptides_table.selection = []
self.layout[6] = pn.Pane(
self.protein_coverage_plot,
config=update_config(f"{self.gene_name}_coverage_plot"),
align='center',
sizing_mode='stretch_width',
)
self.layout[7:] = [
None, # peptide description
None, # XIC plot
pn.Row(
None, # Previous frame button
None, # Next frame button
),
pn.Row(
None,
None,
sizing_mode='stretch_width',
align='center'
),
None, # Overlap frames button
None, # Show mirrored spectra checkbox
None, # Predicted peptide properties
None, # Summed MS2 spectrum
]
self.peptides_table.loading = False
def display_line_spectra_plots(self, *args):
if self.analysis_software == 'maxquant' and not self.merged_precursor_data.empty:
try:
self.layout[8][1].loading = True
except IndexError:
pass
mz_tol_value = self.mz_tol.value
prec_mono_mz = self.merged_precursor_data.MonoisotopicMz.median()
prec_mono_low_mz = prec_mono_mz / (1 + mz_tol_value / 10**6)
prec_mono_high_mz = prec_mono_mz * (1 + mz_tol_value / 10**6)
prec_rt = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Retention time'])
prec_rt_start_sec = prec_rt*60 - self.rt_tol.value
prec_rt_end_sec = prec_rt*60 + self.rt_tol.value
if self.x_axis_label_mq.value == 'Retention time':
one_over_k0 = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['1/K0'])
one_over_k0_low, one_over_k0_high = one_over_k0 - self.im_tol.value, one_over_k0 + self.im_tol.value
precursor_indices = self.data.raw_data[
:,
one_over_k0_low:one_over_k0_high,
:,
prec_mono_low_mz:prec_mono_high_mz,
'raw'
]
elif self.x_axis_label_mq.value == 'Ion mobility':
precursor_indices = self.data.raw_data[
prec_rt_start_sec:prec_rt_end_sec,
:,
:,
prec_mono_low_mz:prec_mono_high_mz,
'raw'
]
else:
precursor_indices = self.data.raw_data[
self.current_frame,
'raw'
]
self.layout[7] = pn.panel(
f"## The selected peptide has rt = {round(float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Retention time']), 3)}, m/z = {round(float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['m/z']), 3)}, charge = {float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Charge'])}, im = {round(float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['1/K0']), 3)}, andromeda score = {round(float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Andromeda score']), 1)}.",
css_classes=['main-part'],
sizing_mode='stretch_width',
align='center',
margin=(0, 10, 0, -10)
)
conversion_dict = {
'Retention time': 'rt',
'Ion mobility': 'mobility',
'm/z': 'mz'
}
title_renaming = {
'Retention time': 'Extracted ion chromatogram',
'Ion mobility': 'Ion mobility line plot',
'm/z': 'MS1 spectrum'
}
self.layout[8] = pn.Row(
self.x_axis_label_mq,
pn.panel(
alphaviz.plotting.plot_line(
self.data.raw_data,
precursor_indices,
conversion_dict[self.x_axis_label_mq.value],
colorscale_qualitative=self.colorscale_qualitative.value,
),
sizing_mode='stretch_width',
config=update_config(title_renaming[self.x_axis_label_mq.value]),
loading=False
),
sizing_mode='stretch_width',
margin=(5, 10, 0, 10)
)
else:
self.display_elution_profile_plots()
def display_elution_profile_plots(self, *args):
if self.analysis_software == 'diann' and self.peptide:
self.layout[7] = pn.Row(
pn.panel(
f"## The selected peptide has rt = {round(self.peptide['rt']/60, 3)}, m/z = {round(self.peptide['mz'], 3)}, charge = {self.peptide['charge']}, im = {round(self.peptide['im'], 3)}, Quantity.Quality score = {round(float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Quantity.Quality']), 2)}.",
css_classes=['main-part'],
sizing_mode='stretch_width',
align='center',
margin=(0, 10, 0, -10)
),
None
)
try:
self.layout[8][1][0].loading = True
except IndexError:
pass
if self.x_axis_label_diann.value == 'RT dimension':
self.layout[8] = pn.Row(
self.x_axis_label_diann,
pn.panel(
alphaviz.plotting.plot_elution_profile(
self.data.raw_data,
self.peptide,
self.data.mass_dict,
mz_tol=self.mz_tol.value,
rt_tol=self.rt_tol.value,
im_tol=self.im_tol.value,
title=f"Precursor and fragment elution profiles of {self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Modified.Sequence']} in RT dimension ({self.peptide['rt'] / 60:.2f} min)",
colorscale_qualitative=self.colorscale_qualitative.value,
colorscale_sequential=self.colorscale_sequential.value,
),
sizing_mode='stretch_width',
config=update_config('Precursor&fragment elution profile plot in RT dimension'),
loading=False,
),
margin=(5, 10, 0, 10)
)
else:
self.layout[8] = pn.Row(
self.x_axis_label_diann,
pn.Column(
pn.pane.HoloViews(
alphaviz.plotting.plot_elution_profile_heatmap(
self.data.raw_data,
self.peptide,
self.data.mass_dict,
mz_tol=self.mz_tol.value,
rt_tol=self.rt_tol.value,
im_tol=self.im_tol.value,
n_cols=8,
width=180,
height=180,
colormap=self.heatmap_colormap.value,
background_color=self.heatmap_background_color.value,
),
sizing_mode='stretch_width',
linked_axes=True,
loading=False,
),
self.export_svg_elprofiles_button,
align='center',
),
sizing_mode='stretch_width',
margin=(5, 10, 0, 10)
)
def display_heatmap_spectrum(self, *args):
if self.ms1_ms2_frames or self.ms1_frame:
if self.analysis_software == 'maxquant':
ms1_frame = self.current_frame
ms2_frame = self.ms1_ms2_frames[self.current_frame][0]
mz = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['m/z'])
im = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['1/K0'])
elif self.analysis_software == 'diann':
ms1_frame = self.ms1_frame
ms2_frame = self.ms2_frame
mz = self.peptide['mz']
im = self.peptide['im']
try:
self.heatmap_ms1_plot = alphaviz.plotting.plot_heatmap(
self.data.raw_data[ms1_frame],
mz=mz,
im=im,
x_axis_label=self.heatmap_x_axis.value,
y_axis_label=self.heatmap_y_axis.value,
title=f'MS1 frame(s) #{ms1_frame}',
colormap=self.heatmap_colormap.value,
background_color=self.heatmap_background_color.value,
precursor_size=self.heatmap_precursor_size.value,
precursor_color=self.heatmap_precursor_color.value,
width=570,
height=450,
margin=(0, 10, 10, 0),
)
self.heatmap_ms2_plot = alphaviz.plotting.plot_heatmap(
self.data.raw_data[ms2_frame],
x_axis_label=self.heatmap_x_axis.value,
y_axis_label=self.heatmap_y_axis.value,
title=f'MS2 frame(s) #{ms2_frame}',
colormap=self.heatmap_colormap.value,
background_color=self.heatmap_background_color.value,
width=570,
height=450,
margin=(0, 10, 10, 0),
)
self.layout[10] = pn.Row(
None,
None,
align='center',
sizing_mode='stretch_width'
)
self.layout[10][0] = pn.Column(
pn.pane.HoloViews(
self.heatmap_ms1_plot,
margin=(15, 0, 0, 0),
linked_axes=False if self.analysis_software == 'diann' else True,
loading=False
),
self.export_svg_ms1_button,
align='center',
)
self.layout[10][1] = pn.Column(
pn.pane.HoloViews(
self.heatmap_ms2_plot,
margin=(15, 0, 0, 0),
linked_axes=False if self.analysis_software == 'diann' else True,
loading=False
),
self.export_svg_ms2_button,
align='center',
)
except ValueError:
print('The x- and y-axis of the heatmaps should be different.')
except BaseException as x:
print('The heatmaps cannot be displayed.')
if self.analysis_software == 'diann':
if self.x_axis_label_diann.value == 'RT/IM dimension':
self.display_elution_profile_plots()
if self.analysis_software == 'maxquant':
for each in [self.previous_frame, self.next_frame, self.plot_overlapped_frames]:
if len(self.ms1_ms2_frames.keys()) < 2:
each.disabled = True
else:
each.disabled = False
if type(self.layout[9][0]) == pn.pane.markup.Str:
self.layout[9][0] = self.previous_frame
self.layout[9][1] = self.next_frame
self.layout[11] = self.plot_overlapped_frames
self.display_mass_spectrum()
def display_mass_spectrum(self, *args):
data_ions = alphaviz.preprocessing.get_mq_ms2_scan_data(
self.data.mq_msms,
self.scan_number[0],
self.data.raw_data,
self.ms1_ms2_frames[self.current_frame][1]
)
predicted_df = pd.DataFrame(columns=['FragmentMz', 'RelativeIntensity','ions'])
rt_pred, im_pred = float(), float()
if not self.data.psm_df.empty and self.show_mirrored_plot.value:
data_slice = self.data.psm_df.loc[(self.data.psm_df.spec_idx == self.peptides_table.value.iloc[self.peptides_table.selection[0]]['MS/MS scan number']) & (self.data.psm_df.sequence == self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Sequence'])].copy()
predlib = self.data.model_mgr.predict_all(
data_slice,
predict_items=['ms2', 'rt', 'mobility'],
frag_types=['b_z1', 'y_z1'],
multiprocessing=False,
)
rt_pred = round(predlib['precursor_df']['rt_pred'].values[0] * self.data.raw_data.rt_max_value / 60, 3)
im_pred = round(predlib['precursor_df']['mobility_pred'].values[0], 3)
mz_ions = predlib['fragment_mz_df']
intensities_ions = predlib['fragment_intensity_df']
intensities_ions *= -100
predicted_df['FragmentMz'] = mz_ions.b_z1.values.tolist() + mz_ions.y_z1.values.tolist()[::-1]
predicted_df['RelativeIntensity'] = intensities_ions.b_z1.values.tolist() + intensities_ions.y_z1.values.tolist()[::-1]
predicted_df['ions'] = [f"b{i}" for i in range(1, len(mz_ions.b_z1)+1)] + [f"y{i}" for i in range(1, len(mz_ions.y_z1)+1)]
self.ms_spectra_plot = alphaviz.plotting.plot_complex_ms_plot(
data_ions,
title=f'MS2 spectrum for Precursor: {self.ms1_ms2_frames[self.current_frame][1]}',
sequence=self.peptides_table.value.iloc[self.peptides_table.selection[0]]['Sequence'],
predicted=(predicted_df.FragmentMz, predicted_df.RelativeIntensity, predicted_df.ions) if not predicted_df.empty else ()
)
self.layout[12] = self.show_mirrored_plot
if rt_pred:
self.layout[13] = pn.panel(
f"## The predicted peptide properties: retention time = {rt_pred} min, ion mobility = {im_pred} 1/K0.",
css_classes=['main-part'],
sizing_mode='stretch_width',
margin=(-20, 10, 30, 10)
)
self.layout[14] = pn.Pane(
self.ms_spectra_plot,
config=update_config('Combined MS2 spectrum'),
margin=(30, 0, 0, 0),
sizing_mode='stretch_width',
loading=False,
height=600 if predicted_df.empty else 700
)
def display_previous_frame(self, *args):
try:
self.layout[10][0][0].loading = True
self.layout[10][1][0].loading = True
self.layout[14][0].loading = True
except IndexError:
pass
current_frame_index = list(self.ms1_ms2_frames.keys()).index(self.current_frame)
if current_frame_index == 0:
self.current_frame = list(self.ms1_ms2_frames.keys())[-1]
else:
self.current_frame = list(self.ms1_ms2_frames.keys())[current_frame_index - 1]
if self.plot_overlapped_frames.value == True:
self.plot_overlapped_frames.value = False
else:
self.display_heatmap_spectrum()
def display_next_frame(self, *args):
try:
self.layout[10][0][0].loading = True
self.layout[10][1][0].loading = True
self.layout[14][0].loading = True
except IndexError:
pass
current_frame_index = list(self.ms1_ms2_frames.keys()).index(self.current_frame)
if current_frame_index == len(self.ms1_ms2_frames.keys())-1:
self.current_frame = list(self.ms1_ms2_frames.keys())[0]
else:
self.current_frame = list(self.ms1_ms2_frames.keys())[current_frame_index + 1]
if self.plot_overlapped_frames.value == True:
self.plot_overlapped_frames.value = False
else:
self.display_heatmap_spectrum()
def display_overlapped_frames(self, *args):
try:
self.layout[10][0][0].loading = True
self.layout[10][1][0].loading = True
self.layout[14][0].loading = True
except IndexError:
pass
if self.plot_overlapped_frames.value is True:
self.layout[12] = None
self.layout[13] = None
self.layout[14] = None
mz = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['m/z'])
im = float(self.peptides_table.value.iloc[self.peptides_table.selection[0]]['1/K0'])
try:
self.heatmap_ms1_plot = alphaviz.plotting.plot_heatmap(
self.data.raw_data[list(self.ms1_ms2_frames.keys())],
mz=mz,
im=im,
x_axis_label=self.heatmap_x_axis.value,
y_axis_label=self.heatmap_y_axis.value,
title=f'MS1 frame(s) #{list(self.ms1_ms2_frames.keys())}',
colormap=self.heatmap_colormap.value,
background_color=self.heatmap_background_color.value,
width=570,
height=450,
margin=(0, 10, 10, 0),
)
self.heatmap_ms2_plot = alphaviz.plotting.plot_heatmap(
self.data.raw_data[[val[0] for val in self.ms1_ms2_frames.values()]],
x_axis_label=self.heatmap_x_axis.value,
y_axis_label=self.heatmap_y_axis.value,
title=f'MS2 frame(s) #{[val[0] for val in self.ms1_ms2_frames.values()]}',
colormap=self.heatmap_colormap.value,
background_color=self.heatmap_background_color.value,
width=570,
height=450,
margin=(0, 10, 10, 0),
)
self.layout[10] = pn.Row(
None,
None,
align='center',
sizing_mode='stretch_width'
)
self.layout[10][0] = pn.Column(
pn.pane.HoloViews(
self.heatmap_ms1_plot,
margin=(15, 0, 0, 0),
linked_axes=False if self.analysis_software == 'diann' else True,
loading=False
),
self.export_svg_ms1_button,
align='center',
)
self.layout[10][1] = pn.Column(
pn.pane.HoloViews(
self.heatmap_ms2_plot,
margin=(15, 0, 0, 0),
linked_axes=False if self.analysis_software == 'diann' else True,
loading=False
),
self.export_svg_ms2_button,
align='center',
)
except ValueError:
print('The x- and y-axis of the heatmaps should be different.')
except BaseException as x:
print('The heatmaps cannot be displayed.')
else:
self.display_heatmap_spectrum()
def export_svg_ms1(self, *args):
return alphaviz.plotting.export_svg(
self.heatmap_ms1_plot,
filename=os.path.join(
self.data.path_raw_folder.value,
f'{self.gene_name}_ms1_heatmap.svg'
),
height=int(self.image_save_size.value[0]), width=int(self.image_save_size.value[1])
)
def export_svg_ms2(self, *args):
return alphaviz.plotting.export_svg(
self.heatmap_ms2_plot,
filename=os.path.join(
self.data.path_raw_folder.value,
f'{self.gene_name}_ms2_heatmap.svg'
),
height=int(self.image_save_size.value[0]), width=int(self.image_save_size.value[1])
)
def export_svg_elprofiles(self, *args):
for i, subplot in enumerate(self.layout[8][1][0].object):
alphaviz.plotting.export_svg(
subplot,
filename=os.path.join(
self.data.path_raw_folder.value,
f'{self.gene_name}_mzim_heatmap_{i}.svg'
),
height=int(self.image_save_size.value[0]), width=int(self.image_save_size.value[1])
)
def update_plots_color(self, *args):
self.display_chromatogram()
self.run_after_protein_selection()
class QCTab(object):
def __init__(self, data, options):
self.name = "Quality Control"
self.data = data
self.mz_tol = options.layout[0][0][0]
self.layout_qc = None
self.analysis_software = self.data.settings.get('analysis_software')
self.distribution_axis = pn.widgets.Select(
name='Select the variable:',
width=190,
margin=(0, 0, 0, 80),
)
self.mass_density_axis = pn.widgets.Select(
name='Select the variable:',
width=220,
margin=(0, 0, 0, 90),
)
def create_layout(self):
dependances = {
self.mz_tol: [self.display_mass_density_plot, 'value'],
self.distribution_axis: [self.display_distribution_plot, 'value'],
self.mass_density_axis: [self.display_mass_density_plot, 'value'],
}
for k in dependances.keys():
k.param.watch(
dependances[k][0],
dependances[k][1]
)
experiment = self.data.ms_file_name.value.split('.')[0]
if self.analysis_software == 'maxquant':
self.mass_density_axis.options = ['Uncalibrated mass error [ppm]', 'Mass error [ppm]']
self.distribution_axis.options = ['m/z', 'Charge', 'Length', 'Mass', '1/K0', 'CCS', 'K0 length', 'Missed cleavages', 'Andromeda score', 'Intensity', 'Mass error [ppm]', 'Mass error [Da]', 'Uncalibrated mass error [ppm]', 'Uncalibrated mass error [Da]', 'Score', '(EXP) # peptides']
self.distribution_axis.value = ['m/z']
self.layout_qc = pn.Column(
pn.widgets.Tabulator(
self.data.mq_summary,
sizing_mode='stretch_width',
layout='fit_data_table',
name='Overview table',
selection=list(self.data.mq_summary[self.data.mq_summary['Raw file'].str.contains(experiment)].index),
row_height=40,
disabled=True,
height=200,
show_index=False,
),
pn.panel(
"## Quality control of the entire sample",
align='center',
margin=(15, 10, -5, 10)
),
pn.Row(
pn.Column(
self.mass_density_axis,
self.display_mass_density_plot(),
align='start',
),
pn.Column(
self.distribution_axis,
self.display_distribution_plot(),
align='start',
),
align='center',
),
margin=(0, 10, 5, 10),
sizing_mode='stretch_width',
align='start',
)
elif self.analysis_software == 'diann':
self.distribution_axis.options = ['m/z', 'Charge', 'Length', 'IM', 'CScore', 'Decoy.CScore', 'Decoy.Evidence', 'Evidence', 'Global.Q.Value', 'Q.Value', 'Quantity.Quality', 'Spectrum.Similarity', '(EXP) # peptides', 'Global.PG.Q.Value', 'PG.Q.Value', 'PG.Quantity', 'Protein.Q.Value']
self.distribution_axis.value = ['m/z']
self.layout_qc = pn.Column(
pn.widgets.Tabulator(
self.data.diann_statist,
sizing_mode='stretch_width',
layout='fit_data_table',
name='Overview table',
selection=list(self.data.diann_statist[self.data.diann_statist['File.Name'].str.contains(experiment)].index),
row_height=40,
disabled=True,
show_index=False,
),
pn.panel(
"## Quality control of the entire sample",
align='center',
margin=(15, 10, -5, 10)
),
pn.Row(
None,
pn.Column(
self.distribution_axis,
self.display_distribution_plot(),
align='center',
),
align='center',
),
margin=(0, 10, 5, 10),
sizing_mode='stretch_width',
align='start',
)
else:
self.layout_qc = pn.pane.Markdown(
'To use this functionality, load the output data from any supported software analysis tool.',
margin=(5, 0, 0, 10),
)
return self.layout_qc
def display_mass_density_plot(self, *args):
if self.analysis_software == 'maxquant':
if self.layout_qc:
self.layout_qc[2][0][1].loading = True
mass_dens_plot_title = 'Uncalibrated mass density plot' if 'Uncalibrated' in self.mass_density_axis.value else 'Calibrated mass density plot'
if self.mass_density_axis.value == 'Uncalibrated mass error [ppm]':
mass_dens_plot = pn.Pane(
alphaviz.plotting.plot_mass_error(
self.data.mq_evidence,
'm/z',
self.mass_density_axis.value,
mass_dens_plot_title,
self.mz_tol.value,
),
loading=False,
config=update_config(f'{mass_dens_plot_title} plot'),
margin=(0, 0, 0, 30),
)
else:
mass_dens_plot = pn.Pane(
alphaviz.plotting.plot_mass_error(
self.data.mq_evidence,
'm/z',
self.mass_density_axis.value,
mass_dens_plot_title,
),
loading=False,
config=update_config(f'{mass_dens_plot_title} plot'),
margin=(0, 0, 0, 30),
)
if self.layout_qc:
self.layout_qc[2][0][1] = mass_dens_plot
else:
return mass_dens_plot
def display_distribution_plot(self, *args):
if self.layout_qc:
self.layout_qc[2][1][1].loading = True
if self.analysis_software == 'maxquant':
if self.distribution_axis.value in ['Score', '(EXP) # peptides']:
data = self.data.mq_protein_groups
else:
data = self.data.mq_evidence
elif self.analysis_software == 'diann':
if self.distribution_axis.value in ['(EXP) # peptides', 'Global.PG.Q.Value', 'PG.Q.Value', 'PG.Quantity', 'Protein.Q.Value']:
data = self.data.diann_proteins
else:
data = self.data.diann_peptides
if self.distribution_axis.value == 'Score':
title = f'Protein {self.distribution_axis.value.lower()} distribution'
elif self.distribution_axis.value == '(EXP) # peptides':
title = 'Number of peptides per protein'
elif self.distribution_axis.value in ['Global.PG.Q.Value', 'PG.Q.Value', 'PG.Quantity', 'Protein.Q.Value']:
title = f'{self.distribution_axis.value} distribution'
else:
title = f'Peptide {self.distribution_axis.value.lower()} distribution'
if self.distribution_axis.value == '(EXP) # peptides':
plot = pn.panel(
alphaviz.plotting.plot_pept_per_protein_barplot(
data,
self.distribution_axis.value,
title,
),
loading=False,
config=update_config(f'{title} plot'),
)
else:
plot = pn.panel(
alphaviz.plotting.plot_peptide_distr(
data,
self.distribution_axis.value,
title
),
loading=False,
config=update_config(title),
)
if self.layout_qc:
self.layout_qc[2][1][1] = plot
else:
return plot
class TargetModeTab(object):
def __init__(self, data, options):
self.name = "Scout Mode"
self.data = data
self.predicted_dict = None
self.peptide_manual = None
self.peptide_prediction = None
self.mz_tol = options.layout[0][0][0]
self.im_tol = options.layout[0][0][1]
self.rt_tol = options.layout[0][0][2]
self.heatmap_x_axis = options.layout[1][0][0]
self.heatmap_y_axis = options.layout[1][0][1]
self.heatmap_colormap = options.layout[1][0][2]
self.heatmap_background_color = options.layout[1][0][3]
self.heatmap_precursor_size = options.layout[1][0][4]
self.heatmap_precursor_color = options.layout[1][0][5]
self.colorscale_qualitative = options.layout[2][0][0]
self.colorscale_sequential = options.layout[2][0][1]
self.image_save_size = options.layout[2][0][2]
self.image_save_format = options.layout[2][0][3]
self.layout_target_mode_manual = None
self.layout_target_mode_predicted = None
self.analysis_software = self.data.settings.get('analysis_software')
self.targeted_peptides_table = pn.widgets.Tabulator(
value=pd.DataFrame(
columns=['name', 'sequence', 'charge', 'im', 'rt']
),
widths={'index': 70},
sizing_mode='stretch_width',
layout='fit_data_table',
selectable=1,
height=250,
show_index=True,
margin=(25, 12, 10, 18)
)
self.peptides_count = pn.widgets.IntInput(
name='Add N empty row(s)',
value=0,
step=1,
start=0,
end=1000
)
self.peptides_table_text = pn.pane.Markdown(
'Load a table of targeted peptides:',
margin=(5, 0, 0, 10),
)
self.peptides_table_file = pn.widgets.FileInput(
accept='.tsv,.csv,.txt',
margin=(-10, 0, 0, 10)
)
self.clear_peptides_table_button = pn.widgets.Button(
name='Clear table',
button_type='default',
width=300,
margin=(25, 0, 0, 10),
)
self.targeted_peptides_table_pred = pn.widgets.Tabulator(
value=pd.DataFrame(
columns=['sequence', 'mods', 'mod_sites', 'charge']
),
hidden_columns=['frag_end_idx', 'frag_start_idx'],
widths={'index': 70},
sizing_mode='stretch_width',
layout='fit_data_table',
selectable=1,
height=250,
show_index=True,
margin=(25, 12, 10, 18)
)
self.peptides_count_prediction = pn.widgets.IntInput(
name='Add N empty row(s)',
value=0,
step=1,
start=0,
end=1000
)
self.peptides_table_text_prediction = pn.pane.Markdown(
'Load a table of targeted peptides:',
margin=(5, 0, 0, 10),
)
self.peptides_table_file_prediction = pn.widgets.FileInput(
accept='.tsv,.csv,.txt',
margin=(-10, 0, 0, 10)
)
self.clear_peptides_table_button_prediction = pn.widgets.Button(
name='Clear table',
button_type='default',
width=300,
margin=(25, 0, 0, 10),
)
self.run_prediction_button = pn.widgets.Button(
name='Run prediction',
button_type='default',
width=250,
margin=(25, 0, 0, 10),
)
self.run_prediction_spinner = pn.indicators.LoadingSpinner(
value=False,
bgcolor='light',
color='secondary',
margin=(25, 0, 0, 15),
width=30,
height=30
)
self.export_svg_manual_button = pn.widgets.Button(
name='Export as .svg',
button_type='default',
align='center',
disabled=True,
width=250,
margin=(25, 0, 0, 10),
)
self.export_svg_prediction_button = pn.widgets.Button(
name='Export as .svg',
button_type='default',
align='center',
disabled=True,
width=250,
margin=(25, 0, 0, 10),
)
def create_layout(self):
dependances = {
self.peptides_table_file: [self.read_peptides_table, 'value'],
self.targeted_peptides_table: [self.visualize_elution_plots, ['selection', 'value']],
self.peptides_count: [self.update_row_count, 'value'],
self.clear_peptides_table_button: [self.clear_peptide_table, 'clicks'],
self.heatmap_colormap: [self.update_plots, 'value'],
self.heatmap_background_color: [self.update_plots, 'value'],
self.mz_tol: [self.update_plots, 'value'],
self.im_tol: [self.update_plots, 'value'],
self.rt_tol: [self.update_plots, 'value'],
self.colorscale_qualitative: [self.update_plots, 'value'],
self.colorscale_sequential: [self.update_plots, 'value'],
self.image_save_size: [self.update_row_count, 'value'],
self.image_save_format: [self.update_row_count, 'value'],
self.clear_peptides_table_button_prediction: [self.clear_peptide_table_prediction, 'clicks'],
self.peptides_count_prediction: [self.update_row_count_prediction, 'value'],
self.peptides_table_file_prediction: [self.read_peptides_table_prediction, 'value'],
self.run_prediction_button: [self.run_prediction, 'clicks'],
self.targeted_peptides_table_pred: [self.visualize_elution_plots_prediction, ['selection', 'value']],
self.export_svg_manual_button: [self.export_svg_manual, 'clicks'],
self.export_svg_prediction_button: [self.export_svg_prediction, 'clicks'],
}
for k in dependances.keys():
k.param.watch(
dependances[k][0],
dependances[k][1]
)
if 'dia' in self.data.raw_data.acquisition_mode:
self.layout_target_mode_manual = pn.Card(
pn.Row(
pn.Column(
self.peptides_count,
self.peptides_table_text,
self.peptides_table_file,
self.clear_peptides_table_button,
),
self.targeted_peptides_table,
),
None,
None,
None,
margin=(15, 10, 5, 10),
sizing_mode='stretch_width',
align='start',
title='Manual Input',
collapsed=True,
header_background='#dbf0fe',
)
self.layout_target_mode_predicted = pn.Card(
pn.Row(
pn.Column(
self.peptides_count_prediction,
self.peptides_table_text_prediction,
self.peptides_table_file_prediction,
pn.Row(
self.run_prediction_button,
self.run_prediction_spinner,
),
self.clear_peptides_table_button_prediction,
),
self.targeted_peptides_table_pred,
sizing_mode='stretch_width',
),
None,
None,
None,
margin=(15, 10, 5, 10),
sizing_mode='stretch_width',
align='start',
title='Prediction',
collapsed=True,
header_background='#dbf0fe',
# collapsed=True,
)
return pn.Column(
self.layout_target_mode_manual,
self.layout_target_mode_predicted,
sizing_mode='stretch_width',
)
else:
self.layout_target_mode_manual = pn.Column(
pn.pane.Markdown(
'To use this functionality please load DIA data.',
margin=(5, 0, 0, 10),
),
None,
None,
None,
)
return self.layout_target_mode_manual
def update_plots(self, *args):
if self.layout_target_mode_manual:
self.visualize_elution_plots()
if self.layout_target_mode_predicted:
self.visualize_elution_plots_prediction()
def clear_peptide_table(self, *args):
if not self.targeted_peptides_table.value.empty:
self.targeted_peptides_table.selection = []
self.targeted_peptides_table.value = pd.DataFrame(
columns=['name', 'sequence', 'charge', 'im', 'rt'],
)
def update_row_count(self, *args):
if self.targeted_peptides_table.value.empty:
self.targeted_peptides_table.selection = []
self.targeted_peptides_table.value = pd.DataFrame(
columns=['name', 'sequence', 'charge', 'im', 'rt'],
index=range(self.peptides_count.value),
)
else:
self.targeted_peptides_table.value = self.targeted_peptides_table.value.append(
pd.DataFrame(
columns=self.targeted_peptides_table.value.columns,
index=range(self.peptides_count.value),
),
ignore_index=True
)
self.peptides_count.value = 0
def read_peptides_table(self, *args):
file_ext = os.path.splitext(self.peptides_table_file.filename)[-1]
if file_ext == '.csv':
sep = ';'
else:
sep = '\t'
self.targeted_peptides_table.selection = []
self.targeted_peptides_table.value = pd.read_csv(
StringIO(str(self.peptides_table_file.value, "utf-8")),
sep=sep
)
def visualize_elution_plots(self, *args):
if 'dia' in self.data.raw_data.acquisition_mode:
if self.targeted_peptides_table.selection:
try:
self.peptide_manual = self.targeted_peptides_table.value.iloc[self.targeted_peptides_table.selection[0]].to_dict()
except IndexError:
self.peptide_manual = {}
if self.peptide_manual and not any(
|
pd.isna(val)
|
pandas.isna
|
# This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 <NAME>
# Version2
# This version use np.arrays
# Right now replication strand bias analysis works for single point mutations and signatures.
# This python code analyses the Replication Strand Bias
import multiprocessing
import numpy as np
import pandas as pd
import os
from SigProfilerTopography.source.commons.TopographyCommons import CHROM
from SigProfilerTopography.source.commons.TopographyCommons import START
from SigProfilerTopography.source.commons.TopographyCommons import END
from SigProfilerTopography.source.commons.TopographyCommons import SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import PYRAMIDINESTRAND
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLE
from SigProfilerTopography.source.commons.TopographyCommons import TYPE
from SigProfilerTopography.source.commons.TopographyCommons import SUBS
from SigProfilerTopography.source.commons.TopographyCommons import INDELS
from SigProfilerTopography.source.commons.TopographyCommons import DINUCS
from SigProfilerTopography.source.commons.TopographyCommons import MUTATION
from SigProfilerTopography.source.commons.TopographyCommons import MUTATION_LONG
from SigProfilerTopography.source.commons.TopographyCommons import LENGTH
from SigProfilerTopography.source.commons.TopographyCommons import LEADING
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import LIB
from SigProfilerTopography.source.commons.TopographyCommons import CHRBASED
from SigProfilerTopography.source.commons.TopographyCommons import readWig_with_fixedStep_variableStep
from SigProfilerTopography.source.occupancy.ChrBasedSignalArrays import readFileInBEDFormat
from SigProfilerTopography.source.commons.TopographyCommons import memory_usage
from SigProfilerTopography.source.commons.TopographyCommons import write_type_strand_bias_np_array_as_dataframe
from SigProfilerTopography.source.commons.TopographyCommons import write_signature_mutation_type_strand_bias_np_array_as_dataframe
from SigProfilerTopography.source.commons.TopographyCommons import write_sbs_signature_sbs96_mutation_type_replication_strand_bias
from SigProfilerTopography.source.commons.TopographyCommons import write_sample_based_strand1_strand2_as_dataframe
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
from SigProfilerTopography.source.commons.TopographyCommons import get_chrBased_simBased_combined_df_split
from SigProfilerTopography.source.commons.TopographyCommons import get_chrBased_simBased_combined_df
from SigProfilerTopography.source.commons.TopographyCommons import get_chrBased_simBased_dfs
from SigProfilerTopography.source.commons.TopographyCommons import decideFileType
from SigProfilerTopography.source.commons.TopographyCommons import C2A
from SigProfilerTopography.source.commons.TopographyCommons import C2G
from SigProfilerTopography.source.commons.TopographyCommons import C2T
from SigProfilerTopography.source.commons.TopographyCommons import T2A
from SigProfilerTopography.source.commons.TopographyCommons import T2C
from SigProfilerTopography.source.commons.TopographyCommons import T2G
#For Supp Fig2B
CHR10_THRESHOLD_START = 16400000
CHR10_THRESHOLD_END = 26400000
#For Supp Fig2A
CHR20_START = 36260000
CHR20_END = 36830000
# FOR FINDING TRANSITION ZONES (LEADING or LAGGING)
# THRESHOLD_CONSECUTIVE_LONG_STRETCH_LENGTH= 250000 #used in Supp Fig2B
# THRESHOLD_CONSECUTIVE_LONG_STRETCH_LENGTH= 150000
THRESHOLD_CONSECUTIVE_LONG_STRETCH_LENGTH= 10000
# THRESHOLD_DISCARD_LATEST_TRANSITION_ZONE = 100000 #used in Supp Fig2B
THRESHOLD_DISCARD_LATEST_TRANSITION_ZONE = 25000
# THRESHOLD_LATEST_TRANSITION_ZONE = 0
########################################################################
def checkForSameSignedSlopeBetweenConsecutivePeakandValley(chrLong,peakorValleyStart, peakorValleyEnd, chrBasedSmoothedWaveletReplicationTimeSignalDF):
transitionZoneList =[]
# print('################ checkForConsecutive starts ############ fromStart: %s toEnd: %s' %(peakorValleyStart,peakorValleyEnd))
subset_df = chrBasedSmoothedWaveletReplicationTimeSignalDF[(chrBasedSmoothedWaveletReplicationTimeSignalDF[START]>=peakorValleyStart) & (chrBasedSmoothedWaveletReplicationTimeSignalDF[END]<=peakorValleyEnd)]
consecutiveLength = 0
formerRow= None
formerSlopeDirection = None
start = peakorValleyStart
for index,row in subset_df.iterrows():
if formerRow is None:
#We read the row for the first time
formerRow = row
consecutiveLength += 1000
else:
slope = (row.get(SIGNAL) - formerRow.get(SIGNAL)) / 1000
formerRow = row
if (formerSlopeDirection is None):
formerSlopeDirection = np.sign(slope)
consecutiveLength += 1000
elif (formerSlopeDirection==np.sign(slope)):
consecutiveLength += 1000
else:
#They have different signs
if (consecutiveLength>=THRESHOLD_CONSECUTIVE_LONG_STRETCH_LENGTH):
# print('Slope sign changed -- Found one: from %d to %d with %d bases with slope sign %s' %(start,((row.get('start') + row.get('end'))//2), consecutiveLength, formerSlopeDirection))
transitionZoneList.append((chrLong,start,(row.get(START) + row.get(END))//2,formerSlopeDirection,consecutiveLength))
#initialize and start again
consecutiveLength = 1000
start = (row.get(START) + row.get(END))//2
formerRow= row
formerSlopeDirection= np.sign(slope)
continue
# print('slope: %f - np.sign(slope): %f - consecutiveLength: %d ' %(slope,np.sign(slope),consecutiveLength))
formerSlopeDirection = np.sign(slope)
#This is for the last probable transition zone.
if (consecutiveLength >= THRESHOLD_CONSECUTIVE_LONG_STRETCH_LENGTH):
# print('After for loop ends, found one: from %d to %s with %d bases with slope sign %s' % (start, (row.get('start') + row.get('end'))//2, consecutiveLength, formerSlopeDirection))
transitionZoneList.append((chrLong,start,(row.get(START) + row.get(END))//2,formerSlopeDirection,consecutiveLength))
# print('################ checkForConsecutive ends ############ fromStart: %s toEnd: %s' % (peakorValleyStart,peakorValleyEnd))
return transitionZoneList
########################################################################
########################################################################
# chr10_subset_wavelet_processed_df
# chr start end signal
# 265577 chr10 16400500 16401499 24.9438
# 265578 chr10 16401500 16402499 24.9585
# valleys_peaks_df
# chr start end type
# 415 chr10 16454500 16455500 Peak
# 415 chr10 16528500 16529500 Valley
def findLongStretchesofConsistentTransitionZones(chrLong,fromStart,toEnd,chrBasedSmoothedWaveletReplicationTimeSignalDF,valleys_peaks_df):
transitionZonesList =[]
for index,row in valleys_peaks_df.iterrows():
peakorValleyStart = row[START]
peakorValleyEnd = row[END]
peakorValleyMidpoint = (peakorValleyStart+peakorValleyEnd)//2
type = row['type']
if (type =='Peak'):
if (peakorValleyMidpoint>fromStart):
# print('from: %d - to: %d - difference: %d' %(fromStart,peakorValleyMidpoint, (peakorValleyMidpoint-fromStart)))
found = checkForSameSignedSlopeBetweenConsecutivePeakandValley(chrLong,fromStart, peakorValleyMidpoint, chrBasedSmoothedWaveletReplicationTimeSignalDF)
transitionZonesList.extend(found)
# print('found %s' %found)
fromStart=peakorValleyMidpoint
elif (type=='Valley'):
valleyStart =row[START]
valleyEnd = row[END]
valleyMidpoint = (valleyStart+valleyEnd)//2
# This is something special to valley
newValleyStart1 = valleyMidpoint - THRESHOLD_DISCARD_LATEST_TRANSITION_ZONE
newValleyStart2 = valleyMidpoint + THRESHOLD_DISCARD_LATEST_TRANSITION_ZONE
if (newValleyStart1>fromStart):
# print('from: %d - to: %d - difference: %d' % (fromStart, newValleyStart1, (newValleyStart1 - fromStart)))
found = checkForSameSignedSlopeBetweenConsecutivePeakandValley(chrLong,fromStart, newValleyStart1,chrBasedSmoothedWaveletReplicationTimeSignalDF)
transitionZonesList.extend(found)
# print('found %s' % found)
# bypass the genome region between newValleyStart1 and newValleyStart2
fromStart = newValleyStart2
#
#For the last interval
if (toEnd>fromStart):
# print('last one from: %d - to: %d -difference: %d' %(fromStart,toEnd,(toEnd-fromStart)))
found = checkForSameSignedSlopeBetweenConsecutivePeakandValley(chrLong,fromStart, toEnd, chrBasedSmoothedWaveletReplicationTimeSignalDF)
transitionZonesList.extend(found)
# print('found %s' %found)
return transitionZonesList
########################################################################
########################################################################
#TODO Is (replicationStrand_row['end']+1) okey?
# We assume that there are no overlapping intervals with positive and negative slopes.
# To test it have one array for positive slope fill with 1s
# one array for negative slope fill with -2a
# add them if you habe any -1 that means that you contradict this assumption.
def fillReplicationStrandArray(replicationStrand_row,chrBased_replication_array):
# e.g.: replicationStrand_row
# chr chrX
# start 154861998
# end 155096999
# slopeDirection 1 (1 means leading strand -1 means lagging strand on positive strand)
# length 235000
# labels = ['chr', 'start', 'end', 'slopeDirection', 'length']
chrBased_replication_array[replicationStrand_row['start']:replicationStrand_row['end']+1] = replicationStrand_row['slopeDirection']
########################################################################
########################################################################
# July 28, 2020
# Using numpy arrays
# if mutationPyramidineStrand and slope have the same sign increase LEADING STRAND count
# else mutationPyramidineStrand and slope have the opposite sign increase LAGGING STRAND count
# sample_based for further usage
def searchAllMutationOnReplicationStrandArray_using_list_comprehension_using_numpy_array(
mutation_row,
my_type,
chrBasedReplicationArray,
SBS6_mutation_types_np_array,
SBS96_mutation_types_np_array,
ordered_signatures_cutoffs,
df_columns_signatures_mask_array,
SBS6_mutation_types_default_zeros_array,
SBS96_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_default_zeros_array,
indels_signatures_default_zeros_array,
subs_signatures_SBS6_mutation_types_default_zeros_array,
subs_signatures_SBS96_mutation_types_default_zeros_array,
all_types_leading_np_array,
all_types_lagging_np_array,
subs_signature_SBS6_mutation_type_leading_np_array,
subs_signature_SBS6_mutation_type_lagging_np_array,
subs_signature_SBS96_mutation_type_leading_np_array,
subs_signature_SBS96_mutation_type_lagging_np_array,
all_samples_all_types_leading_np_array,
all_samples_all_types_lagging_np_array,
all_samples_subs_signature_mutation_type_leading_np_array,
all_samples_subs_signature_mutation_type_lagging_np_array,
sample_based,
all_samples_np_array,
is_discreet,
df_columns):
if sample_based:
indexofSample = np.where(df_columns == SAMPLE)[0][0]
mutation_sample = mutation_row[indexofSample]
sample_index = np.where(all_samples_np_array == mutation_sample)[0][0]
indexofStart = np.where(df_columns == START)[0][0]
start = mutation_row[indexofStart]
indexofPyrimidineStrand = np.where(df_columns == PYRAMIDINESTRAND)[0][0]
pyramidineStrand = mutation_row[indexofPyrimidineStrand]
subs_signature_SBS6_mutation_type_mask_array = subs_signatures_SBS6_mutation_types_default_zeros_array
subs_signature_SBS96_mutation_type_mask_array = subs_signatures_SBS96_mutation_types_default_zeros_array
probabilities = mutation_row[df_columns_signatures_mask_array]
#############################################################################################################
if(my_type == SUBS):
end = start+1
# e.g.: C>A
indexofMutation = np.where(df_columns == MUTATION)[0][0]
SBS6_mutation_type = mutation_row[indexofMutation]
index_of_mutation_long = np.where(df_columns == MUTATION_LONG)[0][0]
# e.g.: T:AA[C>A]AA
mutation_type_long = mutation_row[index_of_mutation_long]
SBS96_mutation_type = mutation_type_long[3:10]
# six_mutation_types_mask_array.shape (6,)
SBS6_mutation_types_mask_array = np.where(SBS6_mutation_types_np_array == SBS6_mutation_type, 1, 0)
SBS96_mutation_types_mask_array = np.where(SBS96_mutation_types_np_array == SBS96_mutation_type, 1, 0)
if is_discreet:
# Convert True into 1, and False into 0
# subs_signatures_mask_array.shape (num_of_subs_signatures,)
threshold_mask_array = np.greater_equal(probabilities, ordered_signatures_cutoffs)
subs_signatures_mask_array = threshold_mask_array.astype(int)
else:
subs_signatures_mask_array = np.array(probabilities).astype(float)
# Concetanate
all_types_mask_array = np.concatenate((SBS6_mutation_types_mask_array,
SBS96_mutation_types_mask_array,
subs_signatures_mask_array, # SUBS
dinucs_signatures_default_zeros_array,
indels_signatures_default_zeros_array), axis=None)
# multiply subs_signatures_mask_array times six_mutation_types_mask_array
# Add one more dimension to subs_signatures_mask_array and six_mutation_types_mask_array
# subs_signatures_mask_array_2d.shape (1,num_of_subs_signatures)
subs_signatures_mask_array_2d = np.expand_dims(subs_signatures_mask_array, axis=0)
# six_mutation_types_mask_array_2d.shape (1,6)
SBS6_mutation_types_mask_array_2d = np.expand_dims(SBS6_mutation_types_mask_array, axis=0)
# SBS96_mutation_types_mask_array.shape (96,) --> (1,96)
SBS96_mutation_types_mask_array_2d = np.expand_dims(SBS96_mutation_types_mask_array, axis=0)
# to_be_accumulated_array.shape (num_of_subs_signatures,6)
subs_signature_SBS6_mutation_type_mask_array = subs_signatures_mask_array_2d.T * SBS6_mutation_types_mask_array_2d
# to_be_accumulated_array.shape (num_of_subs_signatures,96)
subs_signature_SBS96_mutation_type_mask_array = subs_signatures_mask_array_2d.T * SBS96_mutation_types_mask_array_2d
elif (my_type == DINUCS):
end = start+2
if is_discreet:
# Convert True into 1, and False into 0
threshold_mask_array = np.greater_equal(probabilities, ordered_signatures_cutoffs)
dinucs_signatures_mask_array = threshold_mask_array.astype(int)
else:
dinucs_signatures_mask_array = np.array(probabilities).astype(float)
# Concetanate
all_types_mask_array= np.concatenate((SBS6_mutation_types_default_zeros_array,
SBS96_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_mask_array, # DINUCS
indels_signatures_default_zeros_array), axis=None)
elif (my_type == INDELS):
indexofLength = np.where(df_columns == LENGTH)[0][0]
end = start + int(mutation_row[indexofLength])
if is_discreet:
# Convert True into 1, and False into 0
threshold_mask_array = np.greater_equal(probabilities, ordered_signatures_cutoffs)
indels_signatures_mask_array = threshold_mask_array.astype(int)
else:
indels_signatures_mask_array = np.array(probabilities).astype(float)
# Concetanate
all_types_mask_array= np.concatenate((SBS6_mutation_types_default_zeros_array,
SBS96_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_default_zeros_array,
indels_signatures_mask_array), axis=None)
#############################################################################################################
#############################################################################################################
#if there is overlap with chrBasedReplicationArray
slicedArray = chrBasedReplicationArray[int(start):int(end)]
if (np.any(slicedArray)):
#It must be full with at most -1 and +1
uniqueValueArray = np.unique(slicedArray[np.nonzero(slicedArray)])
# I expect the value of 1 (LEADING on the positive strand) or -1 (LAGGING on the positive strand) so size must be one.
if (uniqueValueArray.size == 1):
for uniqueValue in np.nditer(uniqueValueArray):
# type(decileIndex) is numpy.ndarray
slope = int(uniqueValue)
#They have the same sign, multiplication (1,1) (-1,-1) must be 1
if (slope*pyramidineStrand > 0):
all_types_leading_np_array += all_types_mask_array
subs_signature_SBS6_mutation_type_leading_np_array += subs_signature_SBS6_mutation_type_mask_array
subs_signature_SBS96_mutation_type_leading_np_array += subs_signature_SBS96_mutation_type_mask_array
# They have the opposite sign, multiplication(1,-1) (-1,-) must be -1
elif (slope*pyramidineStrand < 0):
all_types_lagging_np_array += all_types_mask_array
subs_signature_SBS6_mutation_type_lagging_np_array += subs_signature_SBS6_mutation_type_mask_array
subs_signature_SBS96_mutation_type_lagging_np_array += subs_signature_SBS96_mutation_type_mask_array
elif ((uniqueValueArray.size==2) and (pyramidineStrand!=0)):
# Increment both LEADING and LAGGING
all_types_leading_np_array += all_types_mask_array
all_types_lagging_np_array += all_types_mask_array
subs_signature_SBS6_mutation_type_leading_np_array += subs_signature_SBS6_mutation_type_mask_array
subs_signature_SBS6_mutation_type_lagging_np_array += subs_signature_SBS6_mutation_type_mask_array
subs_signature_SBS96_mutation_type_leading_np_array += subs_signature_SBS96_mutation_type_mask_array
subs_signature_SBS96_mutation_type_lagging_np_array += subs_signature_SBS96_mutation_type_mask_array
elif (uniqueValueArray.size>2):
print('There is a situation!!!')
else:
print('There is a situation!!!')
if sample_based:
if (np.any(slicedArray)):
# It must be full with at most -1 and +1
uniqueValueArray = np.unique(slicedArray[np.nonzero(slicedArray)])
# I expect the value of 1 (LEADING on the positive strand) or -1 (LAGGING on the positive strand) so size must be one.
if (uniqueValueArray.size == 1):
for uniqueValue in np.nditer(uniqueValueArray):
# type(decileIndex) is numpy.ndarray
slope = int(uniqueValue)
# They have the same sign, multiplication (1,1) (-1,-1) must be 1
if (slope * pyramidineStrand > 0):
all_samples_all_types_leading_np_array[sample_index] += all_types_mask_array
all_samples_subs_signature_mutation_type_leading_np_array[sample_index] += subs_signature_SBS6_mutation_type_mask_array
# They have the opposite sign, multiplication(1,-1) (-1,-) must be -1
elif (slope * pyramidineStrand < 0):
all_samples_all_types_lagging_np_array[sample_index] += all_types_mask_array
all_samples_subs_signature_mutation_type_lagging_np_array[sample_index] += subs_signature_SBS6_mutation_type_mask_array
elif ((uniqueValueArray.size == 2) and (pyramidineStrand != 0)):
# Increment both LEADING and LAGGING
all_samples_all_types_leading_np_array[sample_index] += all_types_mask_array
all_samples_all_types_lagging_np_array[sample_index] += all_types_mask_array
all_samples_subs_signature_mutation_type_leading_np_array[sample_index] += subs_signature_SBS6_mutation_type_mask_array
all_samples_subs_signature_mutation_type_lagging_np_array[sample_index] += subs_signature_SBS6_mutation_type_mask_array
elif (uniqueValueArray.size > 2):
print('There is a situation!!!')
else:
print('There is a situation!!!')
#############################################################################################################
########################################################################
########################################################################
# For df split
# July 28, 2020
# Using numpy arrays
# if mutationPyramidineStrand and slope have the same sign increase LEADING STRAND count
# else mutationPyramidineStrand and slope have the opposite sign increase LAGGING STRAND count
# sample_based for further usage
def searchAllMutationOnReplicationStrandArray_using_list_comprehension_using_numpy_array_for_df_split(
mutation_row,
chrBasedReplicationArray,
six_mutation_types_np_array,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
df_columns_subs_signatures_mask_array,
df_columns_dinucs_signatures_mask_array,
df_columns_indels_signatures_mask_array,
six_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_default_zeros_array,
indels_signatures_default_zeros_array,
subs_signatures_mutation_types_default_zeros_array,
all_types_leading_np_array,
all_types_lagging_np_array,
subs_signature_mutation_type_leading_np_array,
subs_signature_mutation_type_lagging_np_array,
sample_based,
is_discreet,
df_columns):
indexofStart = np.where(df_columns == START)[0][0]
indexofPyrimidineStrand = np.where(df_columns == PYRAMIDINESTRAND)[0][0]
indexofSample = np.where(df_columns == SAMPLE)[0][0]
indexofType = np.where(df_columns == TYPE)[0][0]
start = mutation_row[indexofStart]
pyramidineStrand = mutation_row[indexofPyrimidineStrand]
sample = mutation_row[indexofSample]
my_type=mutation_row[indexofType]
mutationType = None
subs_signature_mutation_type_mask_array=subs_signatures_mutation_types_default_zeros_array
#############################################################################################################
if(my_type==SUBS):
end = start+1
#e.g.: C>A
indexofMutation = np.where(df_columns == MUTATION)[0][0]
mutationType = mutation_row[indexofMutation]
#six_mutation_types_mask_array.shape (6,)
six_mutation_types_mask_array= np.where(six_mutation_types_np_array == mutationType, 1, 0)
probabilities = mutation_row[df_columns_subs_signatures_mask_array]
if is_discreet:
threshold_mask_array = np.greater_equal(probabilities, ordered_sbs_signatures_cutoffs)
# Convert True into 1, and False into 0
# subs_signatures_mask_array.shape (num_of_subs_signatures,)
subs_signatures_mask_array = threshold_mask_array.astype(int)
else:
subs_signatures_mask_array = np.array(probabilities).astype(float)
#Concetanate
all_types_mask_array= np.concatenate((six_mutation_types_mask_array,
subs_signatures_mask_array,
dinucs_signatures_default_zeros_array,
indels_signatures_default_zeros_array), axis=None)
# Add one more dimension to subs_signatures_mask_array and six_mutation_types_mask_array
# subs_signatures_mask_array_2d.shape (1,num_of_subs_signatures)
subs_signatures_mask_array_2d = np.expand_dims(subs_signatures_mask_array, axis=0)
# six_mutation_types_mask_array_2d.shape (1,6)
six_mutation_types_mask_array_2d = np.expand_dims(six_mutation_types_mask_array, axis=0)
# multiply subs_signatures_mask_array times six_mutation_types_mask_array
subs_signature_mutation_type_mask_array = subs_signatures_mask_array_2d.T * six_mutation_types_mask_array_2d
elif (my_type==DINUCS):
end = start+2
probabilities = mutation_row[df_columns_dinucs_signatures_mask_array]
if is_discreet:
threshold_mask_array = np.greater_equal(probabilities, ordered_dbs_signatures_cutoffs)
# Convert True into 1, and False into 0
dinucs_signatures_mask_array = threshold_mask_array.astype(int)
else:
dinucs_signatures_mask_array = np.array(probabilities).astype(float)
#Concetanate
all_types_mask_array= np.concatenate((six_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_mask_array,
indels_signatures_default_zeros_array), axis=None)
elif (my_type==INDELS):
indexofLength = np.where(df_columns == LENGTH)[0][0]
end = start+int(mutation_row[indexofLength])
probabilities = mutation_row[df_columns_indels_signatures_mask_array]
if is_discreet:
threshold_mask_array = np.greater_equal(probabilities, ordered_id_signatures_cutoffs)
# Convert True into 1, and False into 0
indels_signatures_mask_array = threshold_mask_array.astype(int)
else:
indels_signatures_mask_array = np.array(probabilities).astype(float)
#Concetanate
all_types_mask_array= np.concatenate((six_mutation_types_default_zeros_array,
subs_signatures_default_zeros_array,
dinucs_signatures_default_zeros_array,
indels_signatures_mask_array), axis=None)
#############################################################################################################
#############################################################################################################
#if there is overlap with chrBasedReplicationArray
slicedArray = chrBasedReplicationArray[int(start):int(end)]
if (np.any(slicedArray)):
#It must be full with at most -1 and +1
uniqueValueArray = np.unique(slicedArray[np.nonzero(slicedArray)])
# I expect the value of 1 (LEADING on the positive strand) or -1 (LAGGING on the positive strand) so size must be one.
if (uniqueValueArray.size == 1):
for uniqueValue in np.nditer(uniqueValueArray):
# type(decileIndex) is numpy.ndarray
slope = int(uniqueValue)
#They have the same sign, multiplication (1,1) (-1,-1) must be 1
if (slope*pyramidineStrand > 0):
all_types_leading_np_array += all_types_mask_array
subs_signature_mutation_type_leading_np_array += subs_signature_mutation_type_mask_array
# They have the opposite sign, multiplication(1,-1) (-1,-) must be -1
elif (slope*pyramidineStrand < 0):
all_types_lagging_np_array += all_types_mask_array
subs_signature_mutation_type_lagging_np_array += subs_signature_mutation_type_mask_array
elif ((uniqueValueArray.size==2) and (pyramidineStrand!=0)):
#Increment both LEADING and LAGGING
all_types_leading_np_array += all_types_mask_array
all_types_lagging_np_array += all_types_mask_array
subs_signature_mutation_type_leading_np_array += subs_signature_mutation_type_mask_array
subs_signature_mutation_type_lagging_np_array += subs_signature_mutation_type_mask_array
elif (uniqueValueArray.size>2):
print('There is a situation!!!')
else:
print('There is a situation!!!')
#############################################################################################################
########################################################################
########################################################################
#This code checks whether valleys and peaks are one after another, not two consecutive elements are both valley and peak.
def checkforValidness(chrBased_valleys_peaks_df):
formerRowType = None
for index, row in chrBased_valleys_peaks_df.iterrows():
if formerRowType is None:
formerRowType = row['type']
elif (row['type']== formerRowType):
return False
else:
formerRowType = row['type']
return True
########################################################################
########################################################################
def get_chr_based_replication_strand_array_for_callback(chrLong,chromSize,repliseq_signal_df,valleys_df,peaks_df):
chrBased_replication_array = get_chr_based_replication_strand_array(chrLong, chromSize, repliseq_signal_df, valleys_df, peaks_df)
return (chrLong,chrBased_replication_array)
########################################################################
########################################################################
def get_chr_based_replication_strand_array(chrLong,chromSize,repliseq_signal_df,valleys_df,peaks_df):
# Read chrBasedSmoothedWaveletReplicationTimeSignalDF
chrBased_SmoothedWaveletReplicationTimeSignal_df = repliseq_signal_df[repliseq_signal_df[CHROM] == chrLong]
chrBasedValleysDF = valleys_df[valleys_df[CHROM] == chrLong].copy()
chrBasedValleysDF['type'] = 'Valley'
chrBasedValleysDF.astype(dtype={START: int, END: int})
chrBasedPeaksDF = peaks_df[peaks_df[CHROM] == chrLong].copy()
chrBasedPeaksDF['type'] = 'Peak'
chrBasedPeaksDF.astype(dtype={START: int, END: int})
# Concat Peaks and Valleys
chrBased_valleys_peaks_df = pd.concat([chrBasedValleysDF, chrBasedPeaksDF], axis=0)
# Sort Valleys and peaks
chrBased_valleys_peaks_df.sort_values(START, inplace=True)
if ((chrBased_SmoothedWaveletReplicationTimeSignal_df is not None) and (not chrBased_SmoothedWaveletReplicationTimeSignal_df.empty) and (checkforValidness(chrBased_valleys_peaks_df))):
chrBased_replication_array = fill_chr_based_replication_strand_array(chrLong,
chromSize,
chrBased_SmoothedWaveletReplicationTimeSignal_df,
chrBased_valleys_peaks_df)
return chrBased_replication_array
else:
return None
########################################################################
########################################################################
def fill_chr_based_replication_strand_array(chrLong,
chromSize,
chrBasedSmoothedWaveletReplicationTimeSignalDF,
chrBased_valleys_peaks_df):
# +1 means leading strand, -1 means lagging strand
# we will fill this array using smoothedSignal, peaks and valleys for each chromosome
chrBased_replication_array = np.zeros(chromSize, dtype=np.int8)
firstIndex = chrBasedSmoothedWaveletReplicationTimeSignalDF.index[0]
lastIndex = chrBasedSmoothedWaveletReplicationTimeSignalDF.index[-1]
startColumnIndex = chrBasedSmoothedWaveletReplicationTimeSignalDF.columns.get_loc(START)
endColumnIndex = chrBasedSmoothedWaveletReplicationTimeSignalDF.columns.get_loc(END)
start = chrBasedSmoothedWaveletReplicationTimeSignalDF.iloc[0, startColumnIndex] # get the first row start
end = chrBasedSmoothedWaveletReplicationTimeSignalDF.iloc[-1, endColumnIndex] # get the last row end
# Step1 Find the transition zones
chrBasedTransitionZonesList = findLongStretchesofConsistentTransitionZones(chrLong,
start,
end,
chrBasedSmoothedWaveletReplicationTimeSignalDF,
chrBased_valleys_peaks_df)
labels = ['chr', 'start', 'end', 'slopeDirection', 'length']
chrBasedTransitionZonesDF = pd.DataFrame.from_records(chrBasedTransitionZonesList, columns=labels)
# Step2 Fill the replication array using transition zones
chrBasedTransitionZonesDF.apply(fillReplicationStrandArray, chrBased_replication_array=chrBased_replication_array,axis=1)
return chrBased_replication_array
########################################################################
########################################################################
def read_repliseq_dataframes(smoothedWaveletRepliseqDataFilename,valleysBEDFilename,peaksBEDFilename):
################### Read the Smoothed Wavelet Replication Time Signal starts ###########################
#new way, JAN 7, 2020
file_extension = os.path.splitext(os.path.basename(smoothedWaveletRepliseqDataFilename))[1]
if (file_extension.lower() == '.wig'):
isFileTypeBEDGRAPH = decideFileType(smoothedWaveletRepliseqDataFilename)
if isFileTypeBEDGRAPH:
repliseq_wavelet_signal_df=
|
pd.read_csv(smoothedWaveletRepliseqDataFilename, sep='\t', comment='#', header=None, names=[CHROM,START,END,SIGNAL])
|
pandas.read_csv
|
import logging
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
from datetime import datetime, timedelta
from dateutil import parser
import cartosql
### Constants
SOURCE_URL = "https://missingmigrants.iom.int/global-figures/{year}/xls"
CLEAR_TABLE_FIRST = False
PROCESS_HISTORY = False
DATE_FORMAT = '%Y-%m-%d'
LOG_LEVEL = logging.DEBUG
### Table name and structure
CARTO_TABLE = 'soc_018_missing_migrants'
CARTO_SCHEMA = OrderedDict([
('uid', 'text'),
('the_geom', 'geometry'),
('Reported_Date', 'timestamp'),
('Region_of_Incident', 'text'),
('Number_Dead', 'numeric'),
('Number_Missing', 'numeric'),
('Total_Dead_and_Missing', 'numeric'),
('Number_of_survivors', 'numeric'),
('Number_of_Female', 'numeric'),
('Number_of_Male', 'numeric'),
('Number_of_Children', 'numeric'),
('Cause_of_death', 'text'),
('Location_Description', 'text'),
('Information_Source', 'text'),
('Migrant_Route', 'text'),
('URL', 'text'),
('UNSD_Geographical_Grouping', 'text'),
('Verification_level', 'text')
])
UID_FIELD = 'uid'
TIME_FIELD = 'Reported_Date'
# Table limits
MAX_ROWS = 1000000
MAX_AGE = datetime.today() - timedelta(days=365*10)
###
## Accessing remote data
###
def fetchAndFormatData(year):
df = pd.read_excel(SOURCE_URL.format(year=year))
df["Reported Date"] = df["Reported Date"].apply(lambda item: parser.parse(item, fuzzy=True).strftime(DATE_FORMAT))
return list(df.columns), list(df.values)
def structure_row(headers, values):
row = {}
for key, val in zip(headers, values):
row[key] = val
return row
def clean_row(row):
clean_row = []
for entry in row:
if entry == 'nan':
clean_row.append(None)
elif
|
pd.isnull(entry)
|
pandas.isnull
|
import numpy as np
import pandas as pd
def data_generating_process(params):
"""
Implementation of the data generating process in the simulation study.
Obtain artificial data on individual-level variables given a sharp Regression
Discontinuity setup for one of the following three model specifications:
a linear relationship between outcome and running variable, a fourth-order
polynomial one or a non-polynomial relationship.
Args:
params (dict): Dictionary holding the simulation parameters.
Returns:
pd.DataFrame: Dataframe with data on "r", "d" and "y" -
the running variable, treatment status and observed outcome for
each individual.
"""
# Obtain model parameters.
model = params["model"]
cutoff = params["cutoff"]
tau = params["tau"]
noise_var = params["noise_var"]
n = params["n"]
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 15:41:00 2019
@author: Eric
"""
import numpy as np
import os
from dateutil.parser import parse
import pandas as pd
import datetime
import warnings
os.chdir(r'C:\Users\Eric\Documents\GitHub\Flux_Processing_Code')
import LTAR_QAQC_Online as LLT
import AF_Rename as AF
info_file = r'C:\Users\Eric\Desktop\PyScripts\Flux_Processing_Code\\Inputs_Driver.csv'
Data =
|
pd.read_csv(r'C:\Users\Eric\Desktop\LTAR\LTAR_National_Projects\PhenologyInitiative\EC Data\Processed\LTAR_EC_UMRB_morrissouth_20170609_20181001.csv',header = 1, skiprows=[2], index_col = 'TimeStamp')
|
pandas.read_csv
|
"""
Collection of functions used for the stitching.
IMPORTANT:
The identification of the organization of the fovs in the composite image
can be simplified if the (0,0) coords of the stage/camera will
be set to the same position for all machine used in the analysis.
In our case we started running experiments with the coords not adjusted
so the position of (0,0) is different for all the machine that
are used to generate the data.
"""
from typing import *
import logging
import shutil
import copy
import itertools
import math
import pickle
import zarr
import sys
import operator
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from itertools import groupby
from pathlib import Path
from sklearn.neighbors import NearestNeighbors
import sklearn.linear_model as linmod
from skimage.feature import register_translation
from skimage import measure
from scipy.optimize import minimize
from pynndescent import NNDescent
from pysmFISH.logger_utils import selected_logger
from pysmFISH.fovs_registration import create_fake_image
from pysmFISH.data_models import Dataset
from pysmFISH import io
class organize_square_tiles():
"""Class designed to determine the tile organization and identify the coords of the
overlapping regions between the tiles.
IMPORTANT: The normalize_coords method should be adjusted according to the
setup of the microscope.
"""
def __init__(self, experiment_fpath:str,dataset: pd.DataFrame,
metadata:Dict,round_num:int):
"""Class initialization
Args:
experiment_fpath (str): Path to the experiment to process
dataset (pd.DataFrame): Properties of the images of the experiment
metadata (Dict): Metadata describing the experiment
round_num (int): Reference acquisition round number
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
"""Method to extract images coords in the stage reference
system"""
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
"""
Normalize the coords according to how the stage/camera are set.
This function must be modified according to the stage/camera setup.
ROBOFISH1 has stage with x increasing left-> right and y top->bottom
------> (x)
|
|
V (y)
ROBOFISH2 has stage with x increasing right-> left and y top->bottom
(x) <------
|
|
V (y)
ROBOFISH3 has stage with x increasing left-> right and y bottom->top
^ (y)
|
|
------> (x)
Axis modifications steps:
(1) The reference system will be first converted to image style:
------> (x)
|
|
V (y)
This step will cause a change in the position of the reference corner
for each fov. After image acquisition the reference corner is top-left
however after converting the axis direction to image-style the reference corner
will change postion:
ROBOFISH1: top-left --> top-left
ROBOFISH2: top-left --> top-right
ROBOFISH3: top-left --> bottom-left
(2) The coords will be translated to (0,0)
(3) then to matrix (python) notation
------> (columns)
|
|
V (rows)
"""
# port the coords to image type coords
if self.metadata['machine'] == 'ROBOFISH2':
self.x_coords = - self.x_coords
self.reference_corner_fov_position = 'top-right'
elif self.metadata['machine'] == 'ROBOFISH3':
self.x_coords = - self.x_coords
self.y_coords = - self.y_coords
self.reference_corner_fov_position = 'bottom-left'
elif self.metadata['machine'] == 'ROBOFISH1':
self.reference_corner_fov_position = 'top-left'
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
# shift the coords to reference point (0,0)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_min >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_min>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# if x_max >=0 :
# self.x_coords = self.x_coords - x_min
# else:
# self.x_coords = self.x_coords + np.abs(x_min)
# if y_max>0:
# self.y_coords = self.y_coords - y_min
# else:
# self.y_coords = self.y_coords + np.abs(y_min)
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
# move coords to pxl space
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
# def save_graph_original_coords(self):
# to correct because I already converted the coords to image
# # Turn interactive plotting off
# saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
# plt.ioff()
# # Create image type axes
# labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
# fig = plt.figure(figsize=(20,10))
# plt.plot(self.x_coords,self.y_coords,'or')
# for label, x, y in zip(labels, self.x_coords,self.y_coords):
# plt.annotate(
# label,
# xy=(x,y), xytext=(-2, 2),
# textcoords='offset points', ha='center', va='bottom',fontsize=12)
# plt.tight_layout()
# plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
"""Method used to save the organization of the tiles
"""
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
"""Method that use Nearest neighbors to identify the beighbouring tiles
"""
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
"""Method used to calculate the coords of the overlapping regions between the tiles.
"""
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.reference_corner_fov_position == 'top-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'top-right':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_width
c_br = tile2_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_width
c_br = tile1_c_coords
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'bottom-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_height
r_br = tile2_r_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_height
r_br = tile1_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
"""Method used to run all the methods
"""
self.extract_microscope_coords()
# self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
class organize_square_tiles_old_room():
"""
Class used to identify the orgabnization of the tiles before the
reorganization of the Robofish room of April 2021 when Robofish3
was assembled.
"""
def __init__(self, experiment_fpath:str,dataset, metadata:Dict,round_num:int):
"""
round_num = int
reference channel
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
if self.metadata['machine'] == 'ROBOFISH2':
# RobofishII has stage with reference point
# in the center (0,0)
# consider that we get the top-right corner of the image as well
self.reference_corner_fov_position = 'old-room-robofish2' # Not sure (i don't remember)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_max >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_max>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# flip y_axis
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'ROBOFISH1':
# The current system has stage ref coords top-left
self.reference_corner_fov_position = 'top-left'
# Normalize to (0,0) still BOTTOM-RIGHT
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
self.x_coords = self.x_coords - x_min
self.y_coords = self.y_coords - y_min
# flip axis to move (0,0) on TOP-LEF
self.x_coords = self.x_coords - self.x_coords.max()
self.x_coords = - self.x_coords
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
def save_graph_original_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.plot(self.x_coords,self.y_coords,'or')
for label, x, y in zip(labels, self.x_coords,self.y_coords):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.metadata['machine'] == 'ROBOFISH2':
# If tile coords are top left
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_size
r_bl = tile2_c_coords + self.img_size
r_tr = tile1_c_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_size
r_bl = tile1_r_coords + self.img_size
r_tr = tile2_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_size
c_tr = tile2_c_coords + self.img_size
c_bl = tile1_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_size
c_bl = tile2_c_coords
c_tr = tile1_c_coords + self.img_size
col_order = ('left','right')
elif self.metadata['machine'] == 'ROBOFISH1':
# If tile coords are bottom right
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_size
r_br = tile2_r_coords
r_bl = tile2_c_coords
r_tr = tile1_c_coords - self.img_size
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_size
r_br = tile1_r_coords
r_bl = tile1_r_coords
r_tr = tile2_r_coords - self.img_size
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_size
c_br = tile2_c_coords
c_tr = tile2_c_coords
c_bl = tile1_c_coords - self.img_size
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_size
c_br = tile1_c_coords
c_bl = tile2_c_coords - self.img_size
c_tr = tile1_c_coords
col_order = ('left','right')
else:
pass
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
self.extract_microscope_coords()
self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
def stitch_using_coords_general(decoded_df: pd.DataFrame, tile_corners_coords_pxl: np.ndarray,
reference_corner_fov_position: str, metadata: Dict, tag: str):
"""Function to create a stitched image using the fov coords
of the stage.
Args:
decoded_df (pd.DataFrame): Counts after decoding
tile_corners_coords_pxl (np.ndarray): Coords of the fovs according to the stage
reference_corner_fov_position (str): Position of the reference corner determine by
the organization stage/camera. In our setup can be:
- top-left
- top-right
- bottom_left
metadata (Dict): [description]
tag (str): [description]
Returns:
[type]: Decoded counts with coords of the dots adjusted to the stage
reference point
"""
logger = selected_logger()
was_file = 0
if not isinstance(decoded_df, pd.DataFrame):
was_file = 1
decoded_df_fpath = copy.deepcopy(decoded_df)
decoded_df = pd.read_parquet(decoded_df)
if decoded_df['r_px_registered'].empty:
decoded_df['r_px_'+tag] = np.nan
decoded_df['c_px_'+tag] = np.nan
else:
#fov = decoded_df.iloc[0]['fov_num']
fov = int(decoded_df.fov_num.unique()[0])
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if reference_corner_fov_position == 'top-left':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'top-right':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'bottom-left':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'bottom-right':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'old-room-robofish2':
decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
else:
logger.error(f"the referernce corner fov position name is wrong")
sys.exit(f"the referernce corner fov position name is wrong")
# decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
# decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
if was_file:
decoded_df.to_parquet(decoded_df_fpath,index=False)
else:
return decoded_df
def stitch_using_coords_general_segmented_objects(fov,obj_dict,tile_corners_coords_pxl,reference_corner_fov_position, metadata):
"""
Function used to stitch the segmented object used for defining the cells.
"""
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if obj_dict:
if reference_corner_fov_position == 'top-left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
elif reference_corner_fov_position == 'top-right':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords - (metadata['img_width'] -coords_dict['original_coords'][:,1])]).T
elif reference_corner_fov_position == 'bottom_left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + (metadata['img_height'] -coords_dict['original_coords'][:,0]),
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
return obj_dict
def register_coords_obj(fov,segmentation_output_path,
stitching_parameters,
reference_corner_fov_position,
metadata):
"""Function used to register the coords of the segmented object to th
Args:
fov ([type]): [description]
segmentation_output_path ([type]): [description]
"""
segmented_output = pickle.load(open(segmentation_output_path / ('preprocessed_data_fov_' + str(fov) + '_mask.pkl'), 'rb'))
segmented_regions = measure.regionprops(segmented_output)
segmented_regions_dict = {}
for prop in segmented_regions:
segmented_regions_dict[str(fov)+'-'+str(prop.label)] = {}
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['original_coords']=prop.coords
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['stitched_coords']= np.nan
segmented_regions_dict = stitch_using_coords_general_segmented_objects(fov,segmented_regions_dict,
stitching_parameters,reference_corner_fov_position, metadata)
pickle.dump(segmented_regions_dict,open(segmentation_output_path / ('registered_objs_dict_fov_' + str(fov) + '.pkl'), 'wb'))
def get_all_dots_in_overlapping_regions(counts_df, chunk_coords, stitching_selected='microscope_stitched'):
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
r_tl = chunk_coords[0]
r_br = chunk_coords[1]
c_tl = chunk_coords[2]
c_br = chunk_coords[3]
overlapping_ref_df = counts_df.loc[(counts_df[r_tag] > r_tl) & (counts_df[r_tag] < r_br)
& (counts_df[c_tag] > c_tl) & (counts_df[c_tag] < c_br),:]
return overlapping_ref_df
# TODO adjust the registration with dots (triangulation)
def register_cpl(cpl, chunk_coords, experiment_fpath,
stitching_channel,
reference_round):
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df =
|
pd.read_parquet(counts1_fpath)
|
pandas.read_parquet
|
# Defines the modules
from onsset import *
import pandas as pd
from openpyxl import load_workbook
def calibration(specs_path, csv_path, specs_path_calib, calibrated_csv_path):
specs = pd.read_excel(specs_path, index_col=0)
SpecsData = pd.read_excel(specs_path, sheet_name='SpecsData')
settlements_in_csv = csv_path
settlements_out_csv = calibrated_csv_path
onsseter = SettlementProcessor(settlements_in_csv)
num_people_per_hh_rural = float(SpecsData.iloc[0][SPE_NUM_PEOPLE_PER_HH_RURAL])
num_people_per_hh_urban = float(SpecsData.iloc[0][SPE_NUM_PEOPLE_PER_HH_URBAN])
# RUN_PARAM: these are the annual household electricity targets
tier_1 = 38.7 # 38.7 refers to kWh/household/year. It is the mean value between Tier 1 and Tier 2
tier_2 = 219
tier_3 = 803
tier_4 = 2117
tier_5 = 2993
onsseter.prepare_wtf_tier_columns(num_people_per_hh_rural, num_people_per_hh_urban,
tier_1, tier_2, tier_3, tier_4, tier_5)
onsseter.condition_df()
onsseter.grid_penalties()
onsseter.calc_wind_cfs()
pop_actual = SpecsData.loc[0, SPE_POP]
pop_future_high = SpecsData.loc[0, SPE_POP_FUTURE + 'High']
pop_future_low = SpecsData.loc[0, SPE_POP_FUTURE + 'Low']
urban_current = SpecsData.loc[0, SPE_URBAN]
urban_future = SpecsData.loc[0, SPE_URBAN_FUTURE]
start_year = int(SpecsData.loc[0, SPE_START_YEAR])
end_year = int(SpecsData.loc[0, SPE_END_YEAR])
intermediate_year = 2025
elec_actual = SpecsData.loc[0, SPE_ELEC]
elec_actual_urban = SpecsData.loc[0, SPE_ELEC_URBAN]
elec_actual_rural = SpecsData.loc[0, SPE_ELEC_RURAL]
pop_tot = SpecsData.loc[0, SPE_POP]
urban_modelled = onsseter.calibrate_pop_and_urban(pop_actual, pop_future_high, pop_future_low, urban_current,
urban_future, start_year, end_year, intermediate_year)
elec_modelled, rural_elec_ratio, urban_elec_ratio = \
onsseter.elec_current_and_future(elec_actual, elec_actual_urban, elec_actual_rural, pop_tot, start_year)
SpecsData.loc[0, SPE_URBAN_MODELLED] = urban_modelled
SpecsData.loc[0, SPE_ELEC_MODELLED] = elec_modelled
SpecsData.loc[0, 'rural_elec_ratio_modelled'] = rural_elec_ratio
SpecsData.loc[0, 'urban_elec_ratio_modelled'] = urban_elec_ratio
book = load_workbook(specs_path)
writer = pd.ExcelWriter(specs_path_calib, engine='openpyxl')
writer.book = book
# RUN_PARAM: Here the calibrated "specs" data are copied to a new tab called "SpecsDataCalib". This is what will later on be used to feed the model
SpecsData.to_excel(writer, sheet_name='SpecsDataCalib', index=False)
writer.save()
writer.close()
logging.info('Calibration finished. Results are transferred to the csv file')
onsseter.df.to_csv(settlements_out_csv, index=False)
def scenario(specs_path, calibrated_csv_path, results_folder, summary_folder):
ScenarioInfo = pd.read_excel(specs_path, sheet_name='ScenarioInfo')
Scenarios = ScenarioInfo['Scenario']
ScenarioParameters = pd.read_excel(specs_path, sheet_name='ScenarioParameters')
SpecsData = pd.read_excel(specs_path, sheet_name='SpecsDataCalib')
print(SpecsData.loc[0, SPE_COUNTRY])
for scenario in Scenarios:
print('Scenario: ' + str(scenario + 1))
countryID = SpecsData.iloc[0]['CountryCode']
popIndex = ScenarioInfo.iloc[scenario]['Population_Growth']
tierIndex = ScenarioInfo.iloc[scenario]['Target_electricity_consumption_level']
fiveyearIndex = ScenarioInfo.iloc[scenario]['Electrification_target_5_years']
gridIndex = ScenarioInfo.iloc[scenario]['Grid_electricity_generation_cost']
pvIndex = ScenarioInfo.iloc[scenario]['PV_cost_adjust']
dieselIndex = ScenarioInfo.iloc[scenario]['Diesel_price']
productiveIndex = ScenarioInfo.iloc[scenario]['Productive_uses_demand']
prioIndex = ScenarioInfo.iloc[scenario]['Prioritization_algorithm']
end_year_pop = ScenarioParameters.iloc[popIndex]['PopEndYear']
rural_tier = ScenarioParameters.iloc[tierIndex]['RuralTargetTier']
urban_tier = ScenarioParameters.iloc[tierIndex]['UrbanTargetTier']
grid_price = ScenarioParameters.iloc[gridIndex]['GridGenerationCost']
diesel_price = ScenarioParameters.iloc[dieselIndex]['DieselPrice']
productive_demand = ScenarioParameters.iloc[productiveIndex]['ProductiveDemand']
prioritization = ScenarioParameters.iloc[prioIndex]['PrioritizationAlgorithm']
auto_intensification_2025 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2025']
auto_intensification_2030 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2030']
auto_intensification_2040 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2040']
auto_intensification_2050 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2050']
auto_intensification_2060 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2060']
auto_intensification_2070 = ScenarioParameters.iloc[prioIndex]['AutoIntensificationKM2070']
annual_new_grid_connections_limit_2025 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2025'] * 1000
annual_new_grid_connections_limit_2030 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2030'] * 1000
annual_new_grid_connections_limit_2040 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2040'] * 1000
annual_new_grid_connections_limit_2050 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2050'] * 1000
annual_new_grid_connections_limit_2060 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2060'] * 1000
annual_new_grid_connections_limit_2070 = ScenarioParameters.iloc[fiveyearIndex]['GridConnectionsLimitThousands2070'] * 1000
annual_grid_cap_gen_limit_2025 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2025'] * 1000
annual_grid_cap_gen_limit_2030 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2030'] * 1000
annual_grid_cap_gen_limit_2040 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2040'] * 1000
annual_grid_cap_gen_limit_2050 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2050'] * 1000
annual_grid_cap_gen_limit_2060 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2060'] * 1000
annual_grid_cap_gen_limit_2070 = SpecsData.loc[0, 'NewGridGenerationCapacityAnnualLimitMW2070'] * 1000
settlements_in_csv = calibrated_csv_path
settlements_out_csv = os.path.join(results_folder,
'{}-1-{}_{}_{}_{}_{}_{}.csv'.format(countryID, popIndex, tierIndex,
fiveyearIndex, gridIndex, pvIndex,
prioIndex))
summary_csv = os.path.join(summary_folder,
'{}-1-{}_{}_{}_{}_{}_{}_summary.csv'.format(countryID, popIndex, tierIndex,
fiveyearIndex, gridIndex, pvIndex,
prioIndex))
onsseter = SettlementProcessor(settlements_in_csv)
start_year = SpecsData.iloc[0][SPE_START_YEAR]
end_year = SpecsData.iloc[0][SPE_END_YEAR]
existing_grid_cost_ratio = SpecsData.iloc[0][SPE_EXISTING_GRID_COST_RATIO]
num_people_per_hh_rural = float(SpecsData.iloc[0][SPE_NUM_PEOPLE_PER_HH_RURAL])
num_people_per_hh_urban = float(SpecsData.iloc[0][SPE_NUM_PEOPLE_PER_HH_URBAN])
max_grid_extension_dist = float(SpecsData.iloc[0][SPE_MAX_GRID_EXTENSION_DIST])
urban_elec_ratio = float(SpecsData.iloc[0]['rural_elec_ratio_modelled'])
rural_elec_ratio = float(SpecsData.iloc[0]['urban_elec_ratio_modelled'])
# annual_new_grid_connections_limit = SpecsData.loc[0, 'NewGridConnectionsAnnualLimitThousands']*1000
pv_no = 1
diesel_no = 1
# RUN_PARAM: Fill in general and technology specific parameters (e.g. discount rate, losses etc.)
Technology.set_default_values(base_year=start_year,
start_year=start_year,
end_year=end_year,
discount_rate=0.10)
grid_calc = Technology(om_of_td_lines=0.02,
distribution_losses=0.122,
connection_cost_per_hh=125,
base_to_peak_load_ratio=0.8,
capacity_factor=1,
tech_life=30,
grid_capacity_investment=float(SpecsData.iloc[0][SPE_GRID_CAPACITY_INVESTMENT]),
grid_penalty_ratio=1,
grid_price=grid_price)
mg_hydro_calc = Technology(om_of_td_lines=0.02,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.85,
capacity_factor=0.5,
tech_life=30,
capital_cost=3000,
om_costs=0.03)
mg_wind_calc = Technology(om_of_td_lines=0.02,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.85,
capital_cost=3750,
om_costs=0.02,
tech_life=20)
mg_pv_calc = Technology(om_of_td_lines=0.02,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.85,
tech_life=20,
om_costs=0.015,
capital_cost=2950)
sa_pv_calc = Technology(base_to_peak_load_ratio=0.9,
tech_life=15,
om_costs=0.02,
capital_cost={0.020: 9620,
0.050: 8780,
0.100: 6380,
1: 4470,
5: 6950},
standalone=True)
mg_diesel_calc = Technology(om_of_td_lines=0.02,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.85,
capacity_factor=0.7,
tech_life=15,
om_costs=0.1,
capital_cost=721)
sa_diesel_calc = Technology(base_to_peak_load_ratio=0.9,
capacity_factor=0.5,
tech_life=10,
om_costs=0.1,
capital_cost=938,
standalone=True)
sa_diesel_cost = {'diesel_price': diesel_price,
'efficiency': 0.28,
'diesel_truck_consumption': 14,
'diesel_truck_volume': 300}
mg_diesel_cost = {'diesel_price':diesel_price,
'efficiency': 0.33,
'diesel_truck_consumption': 33.7,
'diesel_truck_volume': 15000}
# RUN_PARAM: One shall define here the years of analysis (excluding start year) together with access targets per interval and timestep duration
yearsofanalysis = [2025, 2030, 2040, 2050, 2060, 2070]
#yearsofanalysis = [2025, 2030, 2070]
eleclimits = {2025: 1, 2030: 1, 2040: 1, 2050: 1, 2060: 1, 2070: 1}
#eleclimits = {2025: 1, 2030: 1, 2070: 1}
time_steps = {2025: 7, 2030: 5, 2040: 10, 2050: 10, 2060: 10, 2070: 10}
# time_steps = {2025: 7, 2030: 5, 2070: 40}
elements = ["1.Population", "2.New_Connections", "3.Capacity", "4.Investment"]
techs = ["Grid", "SA_Diesel", "SA_PV", "MG_Diesel", "MG_PV", "MG_Wind", "MG_Hydro", "MG_Hybrid"]
time_step_number = {2025: 1, 2030: 2, 2040: 3, 2050: 4, 2060: 5, 2070: 6}
sumtechs = []
for element in elements:
for tech in techs:
sumtechs.append(element + "_" + tech)
sumtechs.append('Min_cluster_pop_2030')
sumtechs.append('Max_cluster_pop_2030')
sumtechs.append('Min_cluster_area')
sumtechs.append('Max_cluster_area')
sumtechs.append('Min_existing_grid_dist')
sumtechs.append('Max_existing_grid_dist')
sumtechs.append('Min_road_dist')
sumtechs.append('Max_road_dist')
sumtechs.append('Min_investment_capita_cost')
sumtechs.append('Max_investment_capita_cost')
total_rows = len(sumtechs)
df_summary =
|
pd.DataFrame(columns=yearsofanalysis)
|
pandas.DataFrame
|
"""
A combined CLI and callable version of the CHIME simulation model.
We adapted the CLI application in the CHIME project (https://github.com/CodeForPhilly/chime).
- added scenario and output_path parameters (separate from main Parameters object)
- added ability to use an input file, command line args, or DEFAULTS to instantiate model
- added ability to import this and call sim_chime() function from external apps so that we can run tons of scenarios over ranges of input parameters
- output is a dictionary of the standard CHIME dataframes as well as dictionaries
containing parameter and variable values.
- added output dataframes and csvs in wide and long format with admit and census data to facilitate analysis
- also writes out csvs
- added ability to include actual census and admit data to facilitate comparitive plotting and analysis
- added ability to specify market share by date to reflect dynamics of regional patient flow
- added ability to specify relative-contact-rate by date to model ramp-up of social distancing and future social distancing policies.
"""
import os
from collections import OrderedDict
from pathlib import Path
from argparse import (
Action,
ArgumentParser,
)
from datetime import datetime
from typing import Dict, List, Sequence, Tuple, Optional
from sys import stdout
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import json
from penn_chime.model.parameters import Parameters, Disposition
from penn_chime.model.sir import Sir
from .model.sirplus import SirPlus
from .model.sirplus import get_doubling_time
from logging import INFO, basicConfig, getLogger
basicConfig(
level=INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
stream=stdout,
)
logger = getLogger(__name__)
def parse_args():
"""Parse args."""
parser = ArgumentParser(description="sim chime scenario runner")
parser.add_argument("parameters", type=str, help="CHIME config (cfg) file")
parser.add_argument(
"--scenario", type=str, default=datetime.now().strftime("%Y.%m.%d.%H.%M."),
help="Prepended to output filenames. (default is current datetime)"
)
parser.add_argument(
"--output-path", type=str, default="", help="location for output file writing",
)
parser.add_argument(
"--market-share", type=str, default=None, help="csv file containing date and market share (<=1.0)",
)
parser.add_argument(
"--dynamic-rcr", type=str, default=None, help="csv file containing dates and relative contact rates",
)
parser.add_argument(
"--admits", type=str, default=None, help="csv file containing admits by date (prior to first mitigation date)",
)
parser.add_argument(
"--actual", type=str, default=None,
help="csv file containing day, date and actual measures (e.g. admits and census",
)
parser.add_argument(
"--experiment", type=str,
help="Undocumented feature: if not None, sim_chimes() function is called and a whole bunch of scenarios are run."
)
parser.add_argument("--quiet", action='store_true',
help="If True, suppresses output messages (default=False")
return parser.parse_args()
def create_params_from_file(file: str):
"""
Create CHIME Parameters object from input config file.
:param file:
:return: Parameters object
"""
args = ['--parameters', file]
p = Parameters.create(os.environ, args)
return p
def sim_chime(scenario: str, p: Parameters,
intrinsic_growth_rate: float=None,
initial_doubling_time: float=None,
admits_df: Optional=None,
rcr_policies_df: Optional=None):
"""
Run one CHIME simulation
:param scenario: identifier for scenario
:param p: CHIME parameters
:param intrinsic_growth_rate: Can optionally specify to override CHIME fitting/estimation
:param initial_doubling_time: Can optionally specify to override CHIME fitting/estimation
:param admits_df: Experimental - not currently using
:param rcr_policies_df: Dataframe of dates and rcr values
:return:
"""
# Run the model
if rcr_policies_df is None:
is_dynamic_rcr = False
# Just call penn_chime version of Sir
m = Sir(p)
else:
# Using dynamic rcrs, need to call customized version of Sir
is_dynamic_rcr = True
m = SirPlus(p, intrinsic_growth_rate, initial_doubling_time,
admits_df, rcr_policies_df)
# Gather results
input_params_dict = vars(p)
results = gather_sim_results(m, scenario, input_params_dict, is_dynamic_rcr)
# Return model object and results
return m, results
def write_results(results, scenario, path):
"""
Write csv (for dataframes) and json (for dictionaries) output files
:param results: Results dictionary
:param scenario: Scenario id to prepend to output filenames
:param path: Location for output filenames
:return: Nothing
"""
# Results dataframes
for df, name in (
(results["sim_sir_w_date_df"], "sim_sir_w_date"),
(results["dispositions_df"], "dispositions"),
(results["admits_df"], "admits"),
(results["census_df"], "census"),
(results["adm_cen_wide_df"], "adm_cen_wide"),
(results["adm_cen_long_df"], "adm_cen_long"),
):
df.to_csv(path + scenario + '_' + name + ".csv", index=True)
if 'sim_sir_enhanced_df' in results.keys():
results['sim_sir_enhanced_df'].to_csv(path + scenario + '_sim_sir_enhanced_df' + ".csv", index=True)
# Variable dictionaries
with open(path + scenario + "_inputs.json", "w") as f:
json.dump(results['input_params_dict'], f, default=str)
with open(path + scenario + "_key_vars.json", "w") as f:
json.dump(results['important_variables_dict'], f)
def gather_sim_results(m, scenario, input_params_dict, is_dynamic_rcr=False):
"""
Gather dataframes and dictionaries into master results dictionary.
:param m:
:param scenario:
:param input_params_dict:
:param is_dynamic_rcr:
:return: Dictionary containing dataframes and other dictionaries with results
"""
# Get key input/output variables
intrinsic_growth_rate = m.intrinsic_growth_rate
beta = m.beta
r_naught = m.r_naught
gamma = m.gamma # Recovery rate
if not is_dynamic_rcr:
# r_t is r_0 after distancing
r_t = m.r_t
doubling_time_t = m.doubling_time_t
intermediate_variables = OrderedDict({
'result_type': 'simsir',
'scenario': scenario,
'intrinsic_growth_rate': intrinsic_growth_rate,
'doubling_time': input_params_dict['doubling_time'],
'gamma': gamma,
'beta': beta,
'r_naught': r_naught,
'r_t': r_t,
'doubling_time_t': doubling_time_t,
})
else:
initial_doubling_time = m.initial_doubling_time
intermediate_variables = OrderedDict({
'result_type': 'simsir',
'scenario': scenario,
'intrinsic_growth_rate': intrinsic_growth_rate,
'initial_doubling_time': initial_doubling_time,
'r_naught': r_naught,
'gamma': gamma,
})
# Create wide and long versions of combined admit and census projection files
wide_df, long_df = join_and_melt(m.admits_df, m.census_df, scenario)
results = {
'result_type': 'simsir',
'scenario': scenario,
'input_params_dict': input_params_dict,
'important_variables_dict': intermediate_variables,
'sim_sir_w_date_df': m.sim_sir_w_date_df,
'dispositions_df': m.dispositions_df,
'admits_df': m.admits_df,
'census_df': m.census_df,
'adm_cen_wide_df': wide_df,
'adm_cen_long_df': long_df
}
return results
def join_and_melt(adm_df, cen_def, scenario):
"""
Create wide and long DataFrames with combined admit and census data suitable for
plotting.
:param adm_df:
:param cen_def:
:param scenario:
:return:
"""
wide_df = pd.merge(adm_df, cen_def, left_index=True, right_index=True,
suffixes=('_adm', '_cen'), validate="1:1")
wide_df.fillna(0., inplace=True)
wide_df['scenario'] = scenario
pd.testing.assert_series_equal(wide_df['day_adm'],
wide_df['day_cen'], check_names=False)
pd.testing.assert_series_equal(wide_df['date_adm'],
wide_df['date_cen'], check_names=False)
wide_df.rename({'day_adm': 'day', 'date_adm': 'date'}, axis='columns', inplace=True)
wide_df.drop(['day_cen', 'date_cen'], axis='columns', inplace=True)
long_df = pd.melt(wide_df,
id_vars=['scenario', 'day', 'date'],
var_name='dispo_measure', value_name='cases')
return wide_df, long_df
def enhance_sim_sir_w_date(m, p, results):
""" Add growth rate, beta, doubling time and basic reproductive number to sir outputs."""
# Create dataframe from (beta, n_days) tuples and add date to it
rcr_policies_df = pd.DataFrame(m.beta_policies, columns=['beta', 'n_days'])
rcr_policies_df['date'] = pd.to_datetime(p.date_first_hospitalized)
time_added = pd.to_timedelta(rcr_policies_df['n_days'].iloc[:].cumsum() - 1, 'd').shift(1).fillna(pd.Timedelta(days=0))
rcr_policies_df['date'].iloc[:] = rcr_policies_df['date'].iloc[:] + time_added.iloc[:]
# Do equivalent of an Excel approximate vlookup
sim_sir_enhanced_df = pd.merge_asof(results['sim_sir_w_date_df'], rcr_policies_df, on='date')
# Ever infected is i+r
sim_sir_enhanced_df['ever_infected'] = sim_sir_enhanced_df['infected'] + sim_sir_enhanced_df['recovered']
# Compute beta as [S(t) - S(t+1)] / S(t)I(t)
# This should be exactly the same beta as specified in rcr policies
sim_sir_enhanced_df['beta_direct'] = sim_sir_enhanced_df['susceptible'].diff(-1) \
/ (sim_sir_enhanced_df['susceptible'] * sim_sir_enhanced_df['infected'])
sim_sir_enhanced_df['ever_infected_pct_change'] = sim_sir_enhanced_df['ever_infected'].pct_change(1)
sim_sir_enhanced_df['doubling_time_t'] = sim_sir_enhanced_df['ever_infected_pct_change'].map(lambda x: get_doubling_time(x))
sim_sir_enhanced_df['basic_reproductive_number_t'] = sim_sir_enhanced_df['susceptible'] * sim_sir_enhanced_df['beta_direct'] / m.gamma
return sim_sir_enhanced_df
def market_share_adjustment(market_share_csv, base_results, mkt_scenario):
"""
Post-processor to use market share by date to get to resource projections.
:param market_share_csv:
:param base_results:
:param mkt_scenario:
:return: results dictionary
"""
# Get the hosp, icu and vent rates from the inputs
rates = {
key: d.rate
for key, d in base_results['input_params_dict']['dispositions'].items()
}
days = {
key: d.days
for key, d in base_results['input_params_dict']['dispositions'].items()
}
# Read market share file
market_share_df = pd.read_csv(market_share_csv, parse_dates=['date'])
sim_sir_w_date_df = base_results['sim_sir_w_date_df'].copy()
all_w_mkt_df =
|
pd.merge(sim_sir_w_date_df, market_share_df, on=['date'], how='left')
|
pandas.merge
|
import scipy.interpolate as sci
import geopandas as gpd
import shapely as shp
import random as random
import math
import arrow
import pandas as pd
import functools
import emeval.metrics.dist_calculations as emd
import emeval.input.spec_details as eisd
random.seed(1)
####
# BEGIN: Building blocks of the final implementations
####
####
# BEGIN: NORMALIZATION
####
# In addition to filtering the sensed values in the polygons, we should also
# really filter the ground truth values in the polygons, since there is no
# ground truth within the polygon However, ground truth points are not known to
# be dense, and in some cases (e.g. commuter_rail_aboveground), there is a
# small gap between the polygon border and the first point outside it. We
# currently ignore this distance
def fill_gt_linestring(e):
section_gt_shapes = gpd.GeoSeries(eisd.SpecDetails.get_shapes_for_leg(e["ground_truth"]["leg"]))
e["ground_truth"]["gt_shapes"] = section_gt_shapes
e["ground_truth"]["linestring"] = emd.filter_ground_truth_linestring(e["ground_truth"]["gt_shapes"])
e["ground_truth"]["utm_gt_shapes"] = section_gt_shapes.apply(lambda s: shp.ops.transform(emd.to_utm_coords, s))
e["ground_truth"]["utm_linestring"] = emd.filter_ground_truth_linestring(e["ground_truth"]["utm_gt_shapes"])
def to_gpdf(location_df):
return gpd.GeoDataFrame(
location_df, geometry=location_df.apply(
lambda lr: shp.geometry.Point(lr.longitude, lr.latitude), axis=1))
def get_int_aligned_trajectory(location_df, tz="UTC"):
lat_fn = sci.interp1d(x=location_df.ts, y=location_df.latitude)
lon_fn = sci.interp1d(x=location_df.ts, y=location_df.longitude)
# In order to avoid extrapolation, we use ceil for the first int and floor
# for the last int
first_int_ts = math.ceil(location_df.ts.iloc[0])
last_int_ts = math.floor(location_df.ts.iloc[-1])
new_ts_range = [float(ts) for ts in range(first_int_ts, last_int_ts, 1)]
new_fmt_time_range = [arrow.get(ts).to(tz) for ts in new_ts_range]
new_lat = lat_fn(new_ts_range)
new_lng = lon_fn(new_ts_range)
new_gpdf = gpd.GeoDataFrame({
"latitude": new_lat,
"longitude": new_lng,
"ts": new_ts_range,
"fmt_time": new_fmt_time_range,
"geometry": [shp.geometry.Point(x, y) for x, y in zip(new_lng, new_lat)]
})
return new_gpdf
####
# END: NORMALIZATION
####
####
# BEGIN: DISTANCE CALCULATION
####
def add_gt_error_projection(location_gpdf, gt_linestring):
location_gpdf["gt_distance"] = location_gpdf.distance(gt_linestring)
location_gpdf["gt_projection"] = location_gpdf.geometry.apply(
lambda p: gt_linestring.project(p))
def add_t_error(location_gpdf_a, location_gpdf_b):
location_gpdf_a["t_distance"] = location_gpdf_a.distance(location_gpdf_b)
location_gpdf_b["t_distance"] = location_gpdf_a.t_distance
def add_self_project(location_gpdf_a):
loc_linestring = shp.geometry.LineString(coordinates=list(zip(
location_gpdf.longitude, location_gdpf.latitude)))
location_gpdf["s_projection"] = location_gpdf.geometry.apply(
lambda p: loc_linestring.project(p))
####
# END: DISTANCE CALCULATION
####
####
# BEGIN: MERGE
####
# Assumes both entries exist
def b_merge_midpoint(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not pd.isnull(loc_row.geometry_a)
midpoint = shp.geometry.LineString(coordinates=[loc_row.geometry_a, loc_row.geometry_i]).interpolate(0.5, normalized=True)
# print(midpoint)
final_geom = (midpoint, "midpoint")
return final_geom
def b_merge_random(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not pd.isnull(loc_row.geometry_a)
r_idx = random.choice(["geometry_a","geometry_i"])
rp = loc_row[r_idx]
# print(midpoint)
final_geom = (rp, r_idx)
return final_geom
def b_merge_closer_gt_dist(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not pd.isnull(loc_row.geometry_a)
if loc_row.gt_distance_a < loc_row.gt_distance_i:
final_geom = (loc_row.geometry_a, "android")
else:
final_geom = (loc_row.geometry_i, "ios")
return final_geom
def b_merge_closer_gt_proj(loc_row):
# print("merging %s" % loc_row)
assert not pd.isnull(loc_row.geometry_i) and not pd.isnull(loc_row.geometry_a)
if loc_row.gt_projection_a < loc_row.gt_projection_i:
final_geom = (loc_row.geometry_a, "android")
else:
final_geom = (loc_row.geometry_i, "ios")
return final_geom
def collapse_inner_join(loc_row, b_merge_fn):
"""
Collapse a merged row. The merge was through inner join so both sides are
known to exist
"""
final_geom, source = b_merge_fn(loc_row)
return {
"ts": loc_row.ts,
"longitude": final_geom.x,
"latitude": final_geom.y,
"geometry": final_geom,
"source": source
}
def collapse_outer_join_stateless(loc_row, b_merge_fn):
"""
Collapse a merged row through outer join. This means that we can have
either the left side or the right side, or both.
- If only one side exists, we use it.
- If both sides exist, we merge using `b_merge_fn`
"""
source = None
if pd.isnull(loc_row.geometry_i):
assert not pd.isnull(loc_row.geometry_a)
final_geom = loc_row.geometry_a
source = "android"
elif pd.isnull(loc_row.geometry_a):
assert not pd.isnull(loc_row.geometry_i)
final_geom = loc_row.geometry_i
source = "ios"
else:
final_geom, source = b_merge_fn(loc_row)
return {
"ts": loc_row.ts,
"longitude": final_geom.x,
"latitude": final_geom.y,
"geometry": final_geom,
"source": source
}
def collapse_outer_join_dist_so_far(loc_row, more_details_fn = None):
"""
Collapse a merged row through outer join. This means that we can have
either the left side or the right side, or both. In this case, we also
want to make sure that the trajectory state is "progressing". In this only
current implementation, we check that the distance along the ground truth
trajectory is progressively increasing. Since this can be complex to debug,
the `more_details` function returns `True` for rows for which we need more
details of the computation.
"""
global distance_so_far
source = None
more_details = False
EMPTY_POINT = shp.geometry.Point()
if more_details_fn is not None and more_details_fn(loc_row):
more_details = True
if more_details:
print(loc_row.gt_projection_a, loc_row.gt_projection_i)
if pd.isnull(loc_row.geometry_i):
assert not pd.isnull(loc_row.geometry_a)
if loc_row.gt_projection_a > distance_so_far:
final_geom = loc_row.geometry_a
source = "android"
else:
final_geom = EMPTY_POINT
elif
|
pd.isnull(loc_row.geometry_a)
|
pandas.isnull
|
import os
import time
import math
import json
import hashlib
import datetime
import pandas as pd
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.update(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_df.columns:
if entity_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].append({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"unique" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mapper(entity_name, entity_df):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_df.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".format(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".format(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].append({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
relation_names = relation_df["Type"].value_counts().index.tolist()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].append({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].append({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"unique": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].append({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"unique": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_df is not None:
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamapper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamapper.json", 'w')
f.write(json.dumps(datamapper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# 申请表
apply_loan_df = spark.sql("select * from adm.adm_credit_apply_quota_doc").toPandas()
# 支用表
zhiyong_loan_df = spark.sql("select * from adm.adm_credit_loan_apply_doc").toPandas()
zhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype("int")
# 逾期表
overdue_sql = """select
*
from adm.adm_credit_apply_quota_doc t1
--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态
--当前逾期天数和历史最大逾期天数
left join
(
select
quota_apply_id,
max(overdue_days_now) as overdue_days_now,
max(his_max_overdue_days) as his_max_overdue_days
from
(
select
c4.quota_apply_id,
c3.overdue_days_now,
c3.his_max_overdue_days
from
adm.adm_credit_loan_apply_doc c4
left join
(
select
c2.business_id,
max(overdue_days_now) as overdue_days_now,
max(overdue_day_calc) as his_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_apply_id
) t4
on t1.quota_apply_id=t4.quota_apply_id
--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------
left join
(
select
quota_apply_id,
max(fpd) as fpd,
max(fpd_ever) as fpd_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_apply_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--当前首逾天数
c1.overdue_day_calc as fpd_ever--历史首逾天数
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_apply_id
) t5
on t1.quota_apply_id=t5.quota_apply_id"""
overday_df = spark.sql(overdue_sql).toPandas()
# 构建借款者实体
def make_borrower_entity():
shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[
["quota_apply_id", "apply_id", "apply_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_apply_id')
borrower_basic_df = shouxin_zhiyong_df[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "apply_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_df.groupby("identity_no")
borrower_ext_df = pd.DataFrame([], columns=["identity_no", "累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额"])
idx = 0
for group, df in borrower:
loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].apply_id.count()
unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功") & (
df.loan_status == "REPAYING")].apply_id.count()
loans_amt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].loan_amount_y.sum()
unpayed_amt = loans_amt - df[
(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "放款成功")].repayment_principal.sum()
borrower_ext_df.loc[idx] = {"identity_no": group, "累计贷款笔数": loans_cnt, "未结清贷款笔数": unclosed_loans_cnt,
"累计贷款金额": loans_amt, "当前贷款余额": unpayed_amt}
idx += 1
borrower_basic_df.drop_duplicates(borrower_basic_df.columns, keep='first', inplace=True)
borrower_entity_df = pd.merge(borrower_basic_df, borrower_ext_df, on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
overday_gp = overday_df[(~pd.isnull(overday_df.overdue_days_now))].groupby("identity_no")["overdue_days_now"].max()
overday_now_df = pd.DataFrame({"identity_no": overday_gp.index, "overdue_days_now": overday_gp.values})
borrower_entity_df = pd.merge(borrower_entity_df, overday_now_df, how="left", on="identity_no")
his_overday_gp = overday_df[(~pd.isnull(overday_df.his_max_overdue_days))].groupby("identity_no")[
"his_max_overdue_days"].max()
his_overday_df = pd.DataFrame({"identity_no": his_overday_gp.index, "his_max_overdue_days": his_overday_gp.values})
borrower_entity_df = pd.merge(borrower_entity_df, his_overday_df, how="left", on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
borrower_entity_df["tag"] = ""
for idx in borrower_entity_df.index:
max_overday = borrower_entity_df.loc[idx, "overdue_days_now"]
his_max_overday = borrower_entity_df.loc[idx, "his_max_overdue_days"]
loan_amt = borrower_entity_df.loc[idx, "累计贷款金额"]
job_status = borrower_entity_df.loc[idx, "on_job_status"]
tag = borrower_entity_df.loc[idx, "tag"]
if his_max_overday > 90:
tag = tag + ",坏客户"
if max_overday > 30:
tag = tag + ",首逾30+"
if job_status == "离职":
tag = tag + ",离职"
if loan_amt > 0:
tag = tag + ",放款"
else:
tag = tag + ",未放款"
p = tag.find(",")
if p == 0:
tag = tag[1:]
borrower_entity_df.loc[idx, "tag"] = tag
borrower_entity_df.drop(["apply_id"], axis=1, inplace=True)
borrower_entity_df.drop_duplicates(borrower_entity_df.columns, inplace=True)
return borrower_entity_df
borrower_entity_df = make_borrower_entity()
borrower_entity_df.columns = ["姓名", "uus_id", "员工号", "身份证号", "性别", "年龄", "星座", "教育程度", "婚姻状态", "城市", "角色", "入职日期",
"离职日期",
"当前在职状态", "当前在职天数", "当前职级", "门店所在城市", "team_code", "shop_code", "area_code",
"marketing_code", "region_code",
"累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额", "当前逾期天数", "历史最大逾期天数", "tag"]
# 构建联系人实体
def make_contact_entity():
contact_df = spark.sql("select * from credit_loan_api_service.personal_contact_info").toPandas()
contact_df = contact_df[contact_df.product_id == "ELOAN_AGENT"]
contact_df = contact_df[["contact_name", "contact_way", "contact_relationship", "uid"]]
contact_df.columns = ["姓名", "联系方式", "关系", "uid"]
contact_df.drop_duplicates(contact_df.columns, inplace=True)
return contact_df
contact_entity_df = make_contact_entity()
contact_entity_df["ext_id"] = contact_entity_df["姓名"] + contact_entity_df["联系方式"] + contact_entity_df["关系"] + \
contact_entity_df["uid"]
contact_entity_df.ext_id = contact_entity_df.ext_id.apply(lambda x: make_md5(x))
# 构建地址实体
def make_address_entity():
address_df = spark.sql("select * from credit_loan_api_service.credit_personal_info").toPandas()
address_df = address_df[address_df.product_id == "ELOAN_AGENT"]
address_df = address_df[["address", "province", "city", "district", "uid"]]
address_df.columns = ["地址", "省份", "城市", "区", "uid"]
address_df.drop_duplicates(address_df.columns, inplace=True)
return address_df
address_entity_df = make_address_entity()
# 构建手机实体
def make_phone_entity():
phones_df = apply_loan_df[["uus_id", "telephone"]]
phones_df = pd.concat([phones_df, zhiyong_loan_df[["uus_id", "telephone"]]])
phones_df = pd.merge(borrower_entity_df[["uus_id"]], phones_df, how="left", on="uus_id")
phones_df = phones_df[~pd.isnull(phones_df.telephone)]
phones_df["tag"] = "借款人"
contact_phones_df = contact_entity_df[["uid", "联系方式"]]
contact_phones_df.rename(columns={"uid": "uus_id", "联系方式": "telephone"}, inplace=True)
contact_phones_df = contact_phones_df[~pd.isnull(contact_phones_df.telephone)]
contact_phones_df["tag"] = "联系人"
phones_df = pd.concat([phones_df, contact_phones_df])
phones_df.rename(columns={"telephone": "手机号"}, inplace=True)
phones_df.drop_duplicates(phones_df.columns, keep='first', inplace=True)
return phones_df
phones_entity_df = make_phone_entity()
# 构建团队,门店,区域,市场,大区实体
def build_teams(code):
team_gp = borrower_entity_df.groupby(code)
team_df = pd.DataFrame([], columns=["编号", "名称", "放款总人数", "放款总金额", "当前总贷款余额", "总坏客户人数"])
idx = 0
for group, df in team_gp:
loan_cnt = df[df["累计贷款笔数"] > 0]["累计贷款笔数"].count()
loan_amt = df["累计贷款金额"].sum()
unpaid_amt = df["当前贷款余额"].sum()
bad_cnt = df[df.tag.str.contains("坏客户")]["身份证号"].count()
team_df.loc[idx] = {"编号": group, "名称": "", "放款总人数": loan_cnt, "放款总金额": loan_amt,
"当前总贷款余额": unpaid_amt, "总坏客户人数": bad_cnt}
idx += 1
team_df.drop_duplicates(team_df.columns, inplace=True)
return team_df
def make_shop_entity():
shop_df = build_teams("shop_code")
shop_df = shop_df[(shop_df["编号"].str.strip().str.len() > 0) & (shop_df["编号"]!=0)]
shop_address_df = spark.sql("select shop_id, shop_code, shop_name, address, city_name from spark_dw.dw_ke_bkjf_shh_house_shop_base_da").toPandas()
shop_df = pd.merge(shop_df, shop_address_df[["shop_code", "shop_name", "address", "city_name"]],
how = "left", left_on="编号", right_on="shop_code")
shop_df["名称"] = shop_df.shop_name
shop_df.drop(["shop_name", "shop_code"], axis=1, inplace=True)
shop_df.rename(columns={"address": "地址", "city_name": "城市"}, inplace=True)
shop_df.drop_duplicates(shop_df.columns, inplace=True)
return shop_df
def make_group_entity(group):
team_df = build_teams(group + "_code")
team_df = team_df[(team_df["编号"].str.strip().str.len() > 0) & (team_df["编号"]!=0)]
tmp_df = apply_loan_df[[group + "_code", group + "_name"]]
team_df = pd.merge(team_df, tmp_df, how="left", left_on="编号", right_on=group + "_code")
team_df["名称"] = team_df[group + "_name"]
team_df.drop([group + "_code", group + "_name"], axis=1, inplace=True)
team_df.drop_duplicates(team_df.columns, inplace=True)
return team_df
team_df = make_group_entity("team")
team_df['tag'] = np.where(team_df['总坏客户人数'] > 1, '高风险组', '正常组')
shop_entity_df = make_shop_entity()
shop_entity_df['tag'] = np.where(shop_entity_df['总坏客户人数'] > 2, '高风险门店', '正常门店')
area_df = make_group_entity("area")
marketing_df = make_group_entity("marketing")
region_df = make_group_entity("region")
# 构建设备ip实体
def make_device_ip():
ip_df = spark.sql("""select ip, udid, union_id, event_time from credit_biz_metrics.device_fingerprint
where date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'""").toPandas()
ip_df.sort_values(by="event_time", inplace=True)
ip_df.drop_duplicates(list(set(ip_df.columns).difference({"event_time"})), keep='first', inplace=True)
return ip_df
ip_df = make_device_ip()
# 构建设备实体
def make_device_entity():
device_df = spark.sql("""select udid, union_id, imei, idfa, meid, event_time from credit_biz_metrics.device_fingerprint
where date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'""").toPandas()
device_df.sort_values(by="event_time", inplace=True)
device_df.drop_duplicates(list(set(device_df.columns).difference({"event_time"})), keep='first', inplace=True)
return device_df
device_df = make_device_entity()
# 构建借款者-联系人关系
def make_borrower_contact():
borrower_contact_df = pd.merge(borrower_entity_df[["uus_id"]], contact_entity_df, left_on="uus_id", right_on="uid")[["uus_id", "关系", "uid", "ext_id"]]
borrower_contact_df.rename(columns={"uus_id": "Left", "关系": "Type", "ext_id": "Right"}, inplace=True)
borrower_contact_df = borrower_contact_df[["Left", "Type", "Right"]]
borrower_contact_df.drop_duplicates(borrower_contact_df.columns, inplace=True)
return borrower_contact_df
borrower_contact_df = make_borrower_contact()
# 构建借款者-手机关系
def make_borrower_phones():
borrower_phones = phones_entity_df[phones_entity_df.tag == "借款人"]
borrower_phones.rename(columns={"uus_id": "Left", "手机号": "Right"}, inplace=True)
borrower_phones["Type"] = "借款人号码"
borrower_phones = borrower_phones[["Left", "Type", "Right"]]
borrower_phones.drop_duplicates(borrower_phones.columns, inplace=True)
return borrower_phones
borrower_phones_df = make_borrower_phones()
# 构建联系人-手机关系
def make_contact_phones():
contact_phones = phones_entity_df[phones_entity_df.tag == "联系人"]
contact_phones.rename(columns={"uus_id": "Left", "手机号": "Right"}, inplace=True)
contact_phones["Type"] = "联系人号码"
contact_phones = contact_phones[["Left", "Type", "Right"]]
contact_phones.drop_duplicates(contact_phones.columns, inplace=True)
return contact_phones
contact_phones_df = make_contact_phones()
# 构建借款人-地址关系
def make_borrower_address():
borrower_address = pd.merge(borrower_entity_df[["uus_id"]], address_entity_df["uid"], left_on="uus_id", right_on="uid")
borrower_address["Type"] = "居住"
borrower_address.rename(columns={"uus_id": "Left", "uid": "Right"}, inplace=True)
borrower_address = borrower_address[["Left", "Type", "Right"]]
borrower_address.drop_duplicates(borrower_address.columns, inplace=True)
return borrower_address
borrower_address_df = make_borrower_address()
# 构建借款者-团队关系
def make_borrower_team():
tmp_gp = zhiyong_loan_df.groupby(["identity_no", "team_code"])
borrower_team = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk=="放款成功")]
if loans.shape[0] == 0:
borrower_team.loc[idx] = {"Left": group[0], "Type": "所属团队", "Right": group[1], "放款时间": "", "放款状态": df.apply_status_risk.values[0]}
idx += 1
continue
min_loan_time = loans.loan_success_time.min()
team_code = loans[loans.loan_success_time == min_loan_time].team_code.values[0]
borrower_team.loc[idx] = {"Left": group[0], "Type": "所属团队", "Right": team_code, "放款时间": min_loan_time, "放款状态": "放款成功"}
idx += 1
borrower_team.drop_duplicates(borrower_team.columns, keep='first', inplace=True)
apply_no_zhiyong = pd.merge(borrower_entity_df[["身份证号", "team_code"]], borrower_team["Left"], how="left", left_on="身份证号", right_on="Left")
apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]
apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)
apply_no_zhiyong.drop(["Left"], axis=1, inplace=True)
apply_no_zhiyong.rename(columns={"身份证号": "Left", "team_code": "Right"}, inplace=True)
apply_no_zhiyong["Type"] = "所属团队"
apply_no_zhiyong["放款时间"] = ""
apply_no_zhiyong["放款状态"] = "未支用"
apply_no_zhiyong = apply_no_zhiyong[["Left", "Type", "Right", "放款时间", "放款状态"]]
return pd.concat([borrower_team, apply_no_zhiyong])
borrower_team = make_borrower_team()
# 构建团队-门店关系
def make_team_shop():
tmp_gp = zhiyong_loan_df.groupby(["team_code", "shop_code"])
team_shop = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
if pd.isnull(group):
continue
loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk=="放款成功")]
if loans.shape[0] == 0:
team_shop.loc[idx] = {"Left": group[0], "Type": "所属门店", "Right": group[1], "放款时间": "", "放款状态": ",".join(df.apply_status_risk.unique())}
idx += 1
continue
min_loan_time = loans.loan_success_time.min()
shop_code = loans[loans.loan_success_time == min_loan_time].shop_code.values[0]
team_shop.loc[idx] = {"Left": group[0], "Type": "所属门店", "Right": shop_code, "放款时间": min_loan_time, "放款状态": "放款成功"}
idx += 1
tmp_df = pd.merge(team_df, borrower_entity_df[['team_code', 'shop_code']], how="left", left_on="编号", right_on="team_code")
tmp_df.drop_duplicates(tmp_df.columns, inplace=True)
apply_no_zhiyong = pd.merge(tmp_df[["编号", 'shop_code']], team_shop["Left"], how="left", left_on="编号", right_on="Left")
apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]
apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)
apply_no_zhiyong.drop(["Left"], axis=1, inplace=True)
apply_no_zhiyong.rename(columns={"编号": "Left", "shop_code": "Right"}, inplace=True)
apply_no_zhiyong["Type"] = "所属门店"
apply_no_zhiyong["放款时间"] = ""
apply_no_zhiyong["放款状态"] = "未支用"
apply_no_zhiyong = apply_no_zhiyong[["Left", "Type", "Right", "放款时间", "放款状态"]]
return pd.concat([team_shop, apply_no_zhiyong])
team_shop = make_team_shop()
# 构建门店-区域关系
def make_shop_area():
tmp_gp = zhiyong_loan_df.groupby(["shop_code", "area_code"])
shop_area = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])
idx = 0
for group, df in tmp_gp:
if pd.isnull(group):
continue
loans = df[(~
|
pd.isnull(df.apply_id)
|
pandas.isnull
|
import math
import os
from os.path import join as pjoin
import json
import copy
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GPUtil
import pandas as pd
from multiprocessing import Pool
from tqdm import tqdm
import sklearn.metrics
from .config import print_config, class_labels
from .utils import (
anno_to_binary, cut_score, debug, display_imgs, info, gen_cwd_slash, labels_to_str, load_config, load_img,
np_macro_f1, str_to_labels, class_id_to_label, class_ids_to_label, combine_windows, chunk, compute_i_coords,
format_macro_f1_details, vec_to_str
)
# from .utils_heavy import predict, model_from_config
from .ignite_trainer import predict as predict
# def predict_and_save_scores(
# config,
# path_to_anno=None,
# path_to_imgs=None,
# save_scores_to=None,
# to_csv=None,
# ):
# model = model_from_config(config, which='latest')
# valid_anno = pd.read_csv(path_to_anno, index_col=0)
# predict(config)
# return valid_anno_predicted
def remove_scores_predicted(config):
cwd_slash = gen_cwd_slash(config)
pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0) \
.drop('Scores Predicted', 1) \
.to_csv(cwd_slash('validation_predictions.csv'))
def evaluate_validation_prediction(config):
info('evaluate_validation_prediction()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0, dtype=object)
prediction_df = pd.read_csv(cwd_slash('valid_predicted.csv'), index_col=0, dtype=object)
anno = anno.join(prediction_df, how='left')
# DEBUG BEGIN
anno.loc[:, ['Target', 'Predicted', 'folder', 'extension']].to_csv(cwd_slash('valid_anno_predicted.csv'))
# DEBUG END
y_true, y_pred = anno_to_binary(anno, config)
macro_f1_score, f1_details = np_macro_f1(y_true, y_pred, config, return_details=True)
print(format_macro_f1_details(f1_details, config))
print(f'macro_f1_score = {macro_f1_score}')
def final_corrections(config):
info('final_corrections()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_predicted.csv'), index_col=0)
# correct best submission [TODO: REMOVE: not for private leaderboard] --------------
# best_anno = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
# rare_classes = [15, 27, 10, 8, 9, 17, 20, 24, 26]
# comparison_anno = anno.copy()
# comparison_anno['best'] = best_anno['Predicted']
# plot_imgs(
# config,
# comparison_anno.query('best != Predicted').sample(28),
# save_as='./tmp/best_submission_corrections.png',
# folder='data/test_minimaps',
# extension='jpg',
# )
# new_rows = []
# for id_, row in comparison_anno.iterrows():
# current_labels = str_to_labels(row['Predicted'])
# best_labels = str_to_labels(row['best'])
# for c in rare_classes:
# if c in current_labels and c not in best_labels:
# debug(f"removing {c} from {id_}")
# current_labels.remove(c)
# if c not in current_labels and c in best_labels:
# debug(f"adding {c} to {id_}")
# current_labels.append(c)
# new_row = {
# 'Id': id_,
# 'Predicted': labels_to_str(current_labels),
# }
# new_rows.append(new_row)
# anno = pd.DataFrame.from_records(new_rows).set_index('Id')
# debug(f"anno ({len(anno)}) =\n{anno.head(10)}")
# correct leaked --------------
# pairs_anno = pd.read_csv('data/identical_pairs.csv')
# hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
# correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
# .join(anno, how='left', on=['test_id'])
# correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
# debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
# debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
# correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
# actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# # DEBUG BEGIN
# # plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# # DEBUG END
# debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
# debug(f"actual_corrections =\n{actual_corrections}")
# anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# correct leaked 2 --------------
pairs_anno = pd.read_csv('data/identical_pairs_new_fixed.csv')
for i_begin, i_end in chunk(len(pairs_anno), 24):
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('test_id', axis=1).set_index('hpa_id'),
save_as=f'./tmp/diff_{i_begin}_hpa.jpg',
folder='data/hpa_public_imgs',
extension='jpg',
background_color=None,
channel=None,
dpi=100,
)
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('hpa_id', axis=1).set_index('test_id'),
save_as=f'./tmp/diff_{i_begin}_test.jpg',
folder='data/test_full_size',
extension='tif',
background_color=None,
channel=['red', 'green', 'blue'],
dpi=100,
)
hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
.join(anno, how='left', on=['test_id'])
correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# DEBUG BEGIN
# plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# DEBUG END
debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
debug(f"actual_corrections =\n{actual_corrections}")
anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# DEBUG BEGIN
# plot_imgs(
# config,
# anno.loc[[27 in str_to_labels(p) for p in anno['Predicted']]],
# folder='data/test_minimaps',
# extension='jpg'
# )
# DEBUG END
anno.to_csv(cwd_slash('test_predicted_corrected.csv'))
# def list_confusion(config):
# fn_counts_list = {}
# class_labels = [f'{k}-{classes[k]}' for k in range(n_classes)]
# for which_class in tqdm(range(n_classes)):
# cwd_slash = gen_cwd_slash(config)
# anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
# y_true, y_pred = anno_to_binary(anno)
# fn = y_true * (1 - y_pred)
# fp = (1 - y_true) * y_pred
# i_fn_predictions = np.nonzero(fn[:, which_class])[0]
# fn_counts = fp[i_fn_predictions, :].sum(axis=0) / len(i_fn_predictions)
# fn_counts_list[class_labels[which_class]] = fn_counts
# # out = pd.Series(fn_counts, index=pd.Index(range(n_classes), name='class'))\
# # .sort_values(ascending=False)\
# # .head(3)
# pd.DataFrame(fn_counts_list, index=class_labels).to_csv('./tmp/confusion.csv')
def plot_imgs(
config,
anno,
save_as='./tmp/imgs.jpg',
folder=None,
extension=None,
background_color=None,
channel=None,
dpi=100,
):
img_list = []
for id_, row in anno.iterrows():
img = load_img(
id_,
config,
resize=False,
folder=row.get('folder') or folder,
channel=channel,
extension=row.get('extension') or extension,
)
# if type(channel) is str:
# channel = {
# 'red': 0,
# 'green': 1,
# 'blue': 2,
# 'yellow': 3,
# }.get(channel)
# if channel is not None:
# img = img[:, :, channel]
debug(f' - Loaded image {id_} with size {img.shape}')
img_label = '\n'.join([f'{id_}'] + [f'{k} = {v}' for k, v in row.items()])
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=save_as,
background_color=background_color,
dpi=dpi,
)
def plot_tfpn_examples(config, which_class, max_n_imgs=28, output_folder='./tmp'):
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
y_true, y_pred = anno_to_binary(anno)
y_true = y_true[:, which_class]
y_pred = y_pred[:, which_class]
def plot_imgs(selector, filename, background_color):
debug(f'selector = {selector}')
if type(config['score_threshold']) is list:
score_threshold = config['score_threshold'][which_class]
else:
score_threshold = config['score_threshold']
tp_idxs = np.nonzero(selector > score_threshold)[0]
if len(tp_idxs) > max_n_imgs:
sample_idxs = np.sort(np.random.choice(range(len(tp_idxs)), max_n_imgs, replace=False))
tp_idxs = tp_idxs[sample_idxs]
img_list = []
for idx in tp_idxs:
row = anno.iloc[idx]
img_id = row.name
labels_true = class_ids_to_label(str_to_labels(row['Target']), config)
labels_pred = class_ids_to_label(str_to_labels(row['Predicted']), config)
img_label = '\n'.join([
f'{img_id}',
f'T: {labels_true}',
f'P: {labels_pred}',
])
# img = load_img(img_id, self.config, resize=False, folder='./data/train_full_size', extension='tif')
img = load_img(
img_id,
config,
resize=False,
folder=config['path_to_valid'],
channel=None,
extension=config['img_extension'],
)
debug(f' - Loaded image {img_id} with size {img.shape}')
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=filename,
background_color=background_color,
)
def out_slash(fn):
return pjoin(output_folder, fn)
plot_imgs(y_true * y_pred, out_slash(f'class_{which_class}_true_positives.png'), 'white')
plot_imgs((1 - y_true) * y_pred, out_slash(f'class_{which_class}_false_positives.png'), 'yellow')
plot_imgs(y_true * (1 - y_pred), out_slash(f'class_{which_class}_false_negatives.png'), 'blue')
# plot_imgs((1 - y_true) * (1 - y_pred), out_slash(f'class_{which_class}_true_negatives.png'), 'black')
def add_extra_data_into_train_anno(config):
cwd_slash = gen_cwd_slash(config)
train_anno = pd.read_csv(cwd_slash('train_windowed_anno.csv'), index_col=0)
valid_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
train_with_hpa_anno = pd.read_csv('data/train_with_hpa.csv', index_col=0)
train_windowed_anno = pd.read_csv('data/train_windowed.csv', index_col=0)
hpa_ids = set(train_with_hpa_anno.index)
existing_ids = set(valid_anno['source_img_id']).union(train_anno['source_img_id'])
new_ids = hpa_ids.difference(existing_ids)
extra_train_anno = train_with_hpa_anno.loc[new_ids]
debug(f'extra_train_anno ({len(extra_train_anno)}) =\n{extra_train_anno.head(10)}')
extra_train_windowed_anno = train_windowed_anno.join(extra_train_anno, how='right', on=['source_img_id'])
debug(f'extra_train_windowed_anno ({len(extra_train_windowed_anno)}) =\n{extra_train_windowed_anno.head(10)}')
pd.concat([train_anno, extra_train_windowed_anno]).to_csv(cwd_slash('train_windowed_anno.csv'))
# def calibrate_one_task(task):
# i_class = task['i_class']
# mat_pred_windowed = task['mat_pred_windowed']
# mat_true = task['mat_true']
# alpha = task['alpha']
# i_windowss = task['i_windowss']
# beta_values = task['beta_values']
# config = task['config']
# details_list = []
# for beta in beta_values:
# vec_true = mat_true[:, i_class]
# vec_pred_windowed = mat_pred_windowed[:, i_class]
# list_pred = []
# for i_source, i_windows in enumerate(i_windowss):
# combined_prediction = vec_pred_windowed[i_windows].mean() + vec_pred_windowed[i_windows].mean()
# list_pred.append(combined_prediction)
# vec_pred = np.array(list_pred)
# f1 = np_macro_f1(vec_true, vec_pred, config)
# details_list.append({
# 'i_class': i_class,
# 'alpha': alpha,
# 'beta': beta,
# 'f1': f1,
# })
# # debug(f'i_class = {i_class}, alpha = {alpha}, beta = {beta}, f1 = {f1}, best_f1 = {best_f1}')
# details_df = pd.DataFrame.from_records(details_list)
# return {
# 'task': task,
# 'details_df': details_df,
# }
# def calibrate_windowed_score(
# config,
# n_threads=70,
# n_cols=7,
# save_graph_to='./tmp/calibrate_score_threshold.png',
# epsilon=1e-7,
# ):
# info('calibrate_windowed_score()')
# cwd_slash = gen_cwd_slash(config)
# alpha_values = range(10)
# beta_values = np.linspace(0, 1, 21)
# mat_pred_windowed = np.load(cwd_slash('valid_windowed_scores.npy'))
# valid_anno = pd.read_csv(config['path_to_valid_anno_cache'])
# mat_true = np.zeros((valid_anno.shape[0], 28))
# for i, target_str in enumerate(valid_anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# valid_windowed_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# valid_windowed_anno['row_number'] = valid_windowed_anno.index
# grouped = valid_windowed_anno.groupby('source_img_id')
# source_id_to_window_row_nums = {id_: group['row_number'].values.tolist() for id_, group in grouped}
# i_windowss = [source_id_to_window_row_nums[id_] for id_ in valid_anno['Id']]
# task_list = [
# {
# 'i_class': i_class,
# 'alpha': alpha,
# 'mat_pred_windowed': mat_pred_windowed,
# 'mat_true': mat_true,
# 'i_windowss': i_windowss,
# 'beta_values': beta_values,
# 'config': config,
# } for i_class in range(config['_n_classes']) for alpha in alpha_values
# ]
# details_dfs = []
# with Pool(n_threads) as p:
# result_iter = p.imap_unordered(calibrate_one_task, task_list)
# for i_result, result in enumerate(result_iter):
# info(
# f"({i_result}/{len(task_list)}) "
# f"i_class = {result['task']['i_class']}, "
# f"alpha = {result['task']['alpha']} is done"
# )
# details_dfs.append(result['details_df'])
# details_df = pd.concat(details_dfs)
# if save_graph_to is not None:
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# for i_class, group_df in details_df.groupby('i_class'):
# mat = group_df.pivot(index='beta', columns='alpha', values='f1')
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.imshow(mat, aspect='auto')
# plt.xticks(range(len(alpha_values)), alpha_values)
# plt.yticks(range(len(beta_values)), beta_values)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# print(details_df)
# details_df.to_csv(cwd_slash('calibrate_windowed_score_details.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score_details.csv')}")
# best_df = pd.concat([group.sort_values('f1').tail(1) for i_class, group in details_df.groupby('i_class')])
# best_df['manually_modified'] = False
# best_df.to_csv(cwd_slash('calibrate_windowed_score.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score.csv')}")
# def calibrate_score_threshold(config, n_cols=7, save_graph_to='./tmp/calibrate_score_threshold.png', epsilon=1e-7):
# info('calibrate_score_threshold()')
# cwd_slash = gen_cwd_slash(config)
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# mat_pred = np.load(cwd_slash('valid_scores.npy'))
# anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# mat_true = np.zeros_like(mat_pred)
# for i, target_str in enumerate(anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# if save_graph_to is not None:
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# best_ths = []
# for class_id in tqdm(config['classes']):
# thresholds = np.round(np.linspace(0, 1, 1001), 3)
# f1_scores = np.zeros_like(thresholds)
# ps = []
# rs = []
# for i_th, th in enumerate(thresholds):
# y_pred = mat_pred[:, i_class]
# y_pred = np.where(y_pred < th, np.zeros_like(y_pred), np.ones_like(y_pred))
# y_true = mat_true[:, i_class]
# tp = np.sum(y_true * y_pred, axis=0)
# # tn = np.sum((1 - y_true) * (1 - y_pred), axis=0)
# fp = np.sum((1 - y_true) * y_pred, axis=0)
# fn = np.sum(y_true * (1 - y_pred), axis=0)
# p = tp / (tp + fp + epsilon)
# r = tp / (tp + fn + epsilon)
# ps.append(p)
# rs.append(r)
# out = 2 * p * r / (p + r + epsilon)
# # replace all NaN's with 0's
# out = np.where(np.isnan(out), np.zeros_like(out), out)
# f1_scores[i_th] = out
# if save_graph_to is not None:
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.plot(thresholds, f1_scores)
# plt.plot(thresholds, ps)
# plt.plot(thresholds, rs)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# # debug(f'thresholds = {thresholds}')
# # debug(f'f1_scores = {f1_scores}')
# best_th = thresholds[np.argmax(f1_scores)]
# best_ths.append(best_th)
# if save_graph_to is not None:
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# debug(f'best_ths = {best_ths}')
# with open(cwd_slash('calibrated_score_threshold.json'), 'w') as f:
# json.dump(best_ths, f)
def predict_for_valid(config):
cwd_slash = gen_cwd_slash(config)
valid_windowed_anno =
|
pd.read_csv(config['path_to_valid_windowed_anno_cache'], index_col=0)
|
pandas.read_csv
|
import csv
import json
import logging
import itertools
import os
import re
import sys
import zipfile
import tempfile
import threading
from collections import OrderedDict, defaultdict
from os.path import basename
from xml.etree.ElementTree import ElementTree
import pandas as pd
from pandas import DataFrame
from pylab import *
from analyzer.models import (Action, Project, Server, ServerMonitoringData,
Test, TestActionAggregateData, TestActionData,
TestAggregate, TestData, TestDataResolution)
from controller.models import TestRunning
logger = logging.getLogger(__name__)
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
def mask(df, f):
return df[f(df)]
def ord_to_char(v, p=None):
return chr(int(v))
def get_dir_size(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
if not f == 'checksum':
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def zip_results_file(file):
if os.path.exists(file + '.zip'):
os.remove(file + '.zip')
logger.info("Move results file " + file + " to zip archive")
with zipfile.ZipFile(
file + ".zip", "w", zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
zip_file.write(file, basename(file))
os.remove(file)
logger.info("File was packed, original file was deleted")
def zip_dir(dirPath, zipPath):
zipf = zipfile.ZipFile(zipPath, mode='w', allowZip64=True)
lenDirPath = len(dirPath)
for root, _, files in os.walk(dirPath):
for file in files:
filePath = os.path.join(root, file)
zipf.write(filePath, filePath[lenDirPath:])
zipf.close()
dateconv = np.vectorize(datetime.datetime.fromtimestamp)
def parse_results_in_dir(results_dir):
id = add_running_test(results_dir)
generate_data(id)
running_test = TestRunning.objects.get(id=id)
running_test.delete()
logger.info("Data was parsed, directory: {0}".format(results_dir))
def add_running_test(root):
# Parse data from Jenkins Job folder
build_xml = ElementTree()
build_parameters = []
display_name = "unknown"
start_time = 0
duration = 0
project_id = 0
jmeter_results_path = os.path.join(root, "jmeter.jtl")
monitoring_data = os.path.join(root, "monitoring.data")
build_xml_path = os.path.join(root, "build.xml")
if os.path.isfile(build_xml_path):
build_xml.parse(build_xml_path)
build_tag = build_xml.getroot()
for params in build_tag:
if params.tag == 'actions':
parameters = params.find('.//parameters')
for parameter in parameters:
name = parameter.find('name')
value = parameter.find('value')
build_parameters.append([name.text, value.text])
elif params.tag == 'startTime':
start_time = int(params.text)
elif params.tag == 'duration':
duration = int(params.text)
elif params.tag == 'displayName':
display_name = params.text
project_name = re.search('/([^/]+)/builds', root).group(1)
if not Project.objects.filter(project_name=project_name).exists():
project = Project(project_name=project_name, show=True)
project.save()
project_id = project.id
build_number = int(re.search('/builds/(\d+)', root).group(1))
running_test = TestRunning(
project_id=project_id,
build_number=build_number,
result_file_dest=jmeter_results_path,
monitoring_file_dest=monitoring_data,
log_file_dest='',
display_name=display_name,
start_time=start_time,
pid=0,
jmeter_remote_instances=None,
workspace=root,
is_running=True,
end_time=start_time + duration, )
running_test.save()
return running_test.id
def unpack_test_results_data(test_id):
'''Un-pack Jmeter result file if exists'''
test_path = Test.objects.get(id=test_id).path
jmeter_results_file_path = os.path.join(test_path, 'jmeter.jtl')
if not os.path.exists(jmeter_results_file_path):
logger.info("Results file does not exists, try to check archive")
jmeter_results_zip = jmeter_results_file_path + ".zip"
if os.path.exists(jmeter_results_zip):
logger.info("Archive file was found: " + jmeter_results_zip)
with zipfile.ZipFile(jmeter_results_zip, "r") as z:
z.extractall(test_path)
return jmeter_results_file_path
def generate_test_results_data(test_id,
project_id,
jmeter_results_file_path='',
monitoring_results_file_path='',
jmeter_results_file_fields=[],
monitoring_results_file_fields=[],
data_resolution='1Min',
mode=''):
data_resolution_id = TestDataResolution.objects.get(
frequency=data_resolution).id
if not jmeter_results_file_fields:
jmeter_results_file_fields = [
'elapsed', 'url', 'responseCode', 'success', 'threadName',
'failureMessage', 'grpThreads', 'allThreads'
]
if not monitoring_results_file_fields:
monitoring_results_file_fields = [
'server_name', 'Memory_used', 'Memory_free', 'Memory_buff',
'Memory_cached', 'Net_recv', 'Net_send', 'Disk_read', 'Disk_write',
'System_la1', 'CPU_user', 'CPU_system', 'CPU_iowait'
]
jmeter_results_file = jmeter_results_file_path
if os.path.exists(jmeter_results_file):
df = pd.DataFrame()
if os.stat(jmeter_results_file).st_size > 1000007777:
logger.debug("Executing a parse for a huge file")
chunks = pd.read_table(
jmeter_results_file, sep=',', index_col=0, chunksize=3000000)
for chunk in chunks:
chunk.columns = jmeter_results_file_fields.split(',')
chunk = chunk[~chunk['URL'].str.contains('exclude_')]
df = df.append(chunk)
else:
df = pd.read_csv(
jmeter_results_file, index_col=0, low_memory=False)
df.columns = jmeter_results_file_fields
df = df[~df['url'].str.contains('exclude_', na=False)]
# If gather data "online" just clean result file
zip_results_file(jmeter_results_file)
df.columns = jmeter_results_file_fields
df.index = pd.to_datetime(dateconv((df.index.values / 1000)))
num_lines = df['elapsed'].count()
logger.debug('Number of lines in file: {}'.format(num_lines))
unique_urls = df['url'].unique()
for url in unique_urls:
url = str(url)
if not Action.objects.filter(
url=url, project_id=project_id).exists():
logger.debug("Adding new action: " + str(url) + " project_id: "
+ str(project_id))
a = Action(url=url, project_id=project_id)
a.save()
a = Action.objects.get(url=url, project_id=project_id)
action_id = a.id
if not TestActionData.objects.filter(
action_id=action_id,
test_id=test_id,
data_resolution_id=data_resolution_id).exists():
logger.debug("Adding action data: {}".format(url))
df_url = df[(df.url == url)]
url_data = pd.DataFrame()
df_url_gr_by_ts = df_url.groupby(
pd.Grouper(freq=data_resolution))
url_data['avg'] = df_url_gr_by_ts.elapsed.mean()
url_data['median'] = df_url_gr_by_ts.elapsed.median()
url_data['count'] = df_url_gr_by_ts.success.count()
df_url_gr_by_ts_only_errors = df_url[(
df_url.success == False
)].groupby(pd.TimeGrouper(freq=data_resolution))
url_data[
'errors'] = df_url_gr_by_ts_only_errors.success.count()
url_data['test_id'] = test_id
url_data['url'] = url
output_json = json.loads(
url_data.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
for row in output_json:
data = {
'timestamp': row,
'avg': output_json[row]['avg'],
'median': output_json[row]['median'],
'count': output_json[row]['count'],
'url': output_json[row]['url'],
'errors': output_json[row]['errors'],
'test_id': output_json[row]['test_id'],
}
test_action_data = TestActionData(
test_id=output_json[row]['test_id'],
action_id=action_id,
data_resolution_id=data_resolution_id,
data=data)
test_action_data.save()
if not TestActionAggregateData.objects.filter(
action_id=action_id, test_id=test_id).exists():
url_agg_data = dict(
json.loads(df_url['elapsed'].describe()
.to_json()))
url_agg_data['99%'] = float(df_url['elapsed'].quantile(.99))
url_agg_data['90%'] = float(df_url['elapsed'].quantile(.90))
url_agg_data['weight'] = float(
df_url['elapsed'].sum())
url_agg_data['errors'] = float(df_url[(
df_url['success'] == False)]['success'].count())
print(url_agg_data)
test_action_aggregate_data = TestActionAggregateData(
test_id=test_id,
action_id=action_id,
data=url_agg_data)
test_action_aggregate_data.save()
if not TestData.objects.filter(
test_id=test_id,
data_resolution_id=data_resolution_id).exists():
test_overall_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Functions to visualize matrices of data."""
import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
try:
from scipy.cluster import hierarchy
_no_scipy = False
except ImportError:
_no_scipy = True
from . import cm
from .axisgrid import Grid
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
__all__ = ["heatmap", "clustermap"]
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatible and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper:
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convert to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, str) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, str) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None or annot is False:
annot = False
annot_data = None
else:
if isinstance(annot, bool):
annot_data = plot_data
else:
annot_data = np.asarray(annot)
if annot_data.shape != plot_data.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws.copy()
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
# plot_data is a np.ma.array instance
calc_data = plot_data.astype(float).filled(np.nan)
if vmin is None:
if robust:
vmin = np.nanpercentile(calc_data, 2)
else:
vmin = np.nanmin(calc_data)
if vmax is None:
if robust:
vmax = np.nanpercentile(calc_data, 98)
else:
vmax = np.nanmax(calc_data)
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, str):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
# Copy bad values
# in mpl<3.2 only masked values are honored with "bad" color spec
# (see https://github.com/matplotlib/matplotlib/pull/14257)
bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]
# under/over values are set for sure when cmap extremes
# do not map to the same color as +-inf
under = self.cmap(-np.inf)
over = self.cmap(np.inf)
under_set = under != self.cmap(0)
over_set = over != self.cmap(self.cmap.N - 1)
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
self.cmap.set_bad(bad)
if under_set:
self.cmap.set_under(under)
if over_set:
self.cmap.set_over(over)
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
height, width = self.annot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
lum = relative_luminance(color)
text_color = ".15" if lum > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label1.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# setting vmin/vmax in addition to norm is deprecated
# so avoid setting if norm is set
if "norm" not in kws:
kws.setdefault("vmin", self.vmin)
kws.setdefault("vmax", self.vmax)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, str) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, str) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
plt.setp(ytl, va="center") # GH2484
# Possibly rotate them if they overlap
_draw_figure(ax.figure)
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
def heatmap(
data, *,
vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None,
**kwargs
):
"""Plot rectangular data as a color-encoded matrix.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default will depend on whether ``center`` is set.
center : float, optional
The value at which to center the colormap when plotting divergent data.
Using this parameter will change the default ``cmap`` if none is
specified.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the data. Note that DataFrames will match on position, not index.
fmt : str, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot``
is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : bool, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
xticklabels, yticklabels : "auto", bool, list-like, or int, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label. If "auto", try to densely plot non-overlapping labels.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
kwargs : other keyword arguments
All other keyword arguments are passed to
:meth:`matplotlib.axes.Axes.pcolormesh`.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
See Also
--------
clustermap : Plot a matrix using hierarchical clustering to arrange the
rows and columns.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set_theme()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0 with a diverging colormap:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data, center=0)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["Jan", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... f, ax = plt.subplots(figsize=(7, 5))
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter:
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
self.dependent_coord = self.dendrogram['dcoord']
self.independent_coord = self.dendrogram['icoord']
def _calculate_linkage_scipy(self):
linkage = hierarchy.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
# vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
linkage = fastcluster.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
if np.product(self.shape) >= 10000:
msg = ("Clustering large matrix with scipy. Installing "
"`fastcluster` may give better performance.")
warnings.warn(msg)
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax, tree_kws):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
tree_kws = {} if tree_kws is None else tree_kws.copy()
tree_kws.setdefault("linewidths", .5)
tree_kws.setdefault("colors", tree_kws.pop("color", (.2, .2, .2)))
if self.rotate and self.axis == 0:
coords = zip(self.dependent_coord, self.independent_coord)
else:
coords = zip(self.independent_coord, self.dependent_coord)
lines = LineCollection([list(zip(x, y)) for x, y in coords],
**tree_kws)
ax.add_collection(lines)
number_of_leaves = len(self.reordered_ind)
max_dependent_coord = max(map(max, self.dependent_coord))
if self.rotate:
ax.yaxis.set_ticks_position('right')
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_ylim(0, number_of_leaves * 10)
ax.set_xlim(0, max_dependent_coord * 1.05)
ax.invert_xaxis()
ax.invert_yaxis()
else:
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_xlim(0, number_of_leaves * 10)
ax.set_ylim(0, max_dependent_coord * 1.05)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
_draw_figure(ax.figure)
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(
data, *,
linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, tree_kws=None, ax=None
):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
tree_kws : dict, optional
Keyword arguments for the ``matplotlib.collections.LineCollection``
that is used for plotting the lines of the dendrogram tree.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
if _no_scipy:
raise RuntimeError("dendrogram requires scipy to be installed")
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax, tree_kws=tree_kws)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None,
dendrogram_ratio=None, colors_ratio=None, cbar_pos=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if _no_scipy:
raise RuntimeError("ClusterGrid requires scipy to be available")
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from gimmebio.ram_seq import rs_matrix, seq_power_series
from gimmebio.seqs import (
hamming_distance,
needle_distance,
)
from scipy.spatial import KDTree
SEED_SIZE = 10 * 1000
BALANCE_GAP = 10 * 1000
BATCH_SIZE = 1000
class KDRFTCover:
def __init__(self, radius, seed_size=-1):
self.rf_coeffs = None
self.seed_size = seed_size
self.points = []
self.centroids = []
self.batch = []
self.radius = radius
self.clusters = {}
self.tree = None
self.raw = []
def ramify(self, kmer):
if self.rf_coeffs is None:
self.rf_coeffs = rs_matrix(len(kmer))
rft = seq_power_series(kmer, RS=self.rf_coeffs)[:min(12, len(kmer))]
return np.array(rft)
def add(self, kmer):
self.raw.append(kmer)
rft = self.ramify(kmer)
self.points.append(rft)
def search(self, kmer, max_dist):
rft = self.ramify(kmer)
centroids = self.tree.query_ball_point(rft, max_dist, eps=0.01)
return centroids
def greedy_clusters(self, logger=None):
all_tree, index_map = KDTree(np.array(self.points)), {i: i for i in range(len(self.points))}
clusters, clustered_points = {}, set()
batch_map, batch_points = {}, []
for i, rft in enumerate(self.points):
if i in clustered_points:
continue
batch_map[len(batch_points)] = i
batch_points.append(rft)
if len(batch_points) == 1000:
if logger is not None:
logger(f'Running batch, starting with {len(clusters)} clusters')
clusters, clustered_points = self._greedy_cluster_batch(
all_tree, index_map,
batch_map, batch_points,
clusters, clustered_points
)
batch_map, batch_points = {}, []
# Rebuild all_tree to only include points which are not yet clustered
# this works because we cannot cluster points twice and it makes
# the search space smaller (at the expense of rebuilding the tree and
# added code complexity for offset)
unclustered_points, index_map = [], {}
for i, point in enumerate(self.points):
if i in clustered_points:
continue
index_map[len(unclustered_points)] = i
unclustered_points.append(point)
if unclustered_points:
all_tree = KDTree(np.array(unclustered_points))
if batch_points:
clusters, clustered_points = self._greedy_cluster_batch(
all_tree, index_map, batch_map, batch_points, clusters, clustered_points
)
self.clusters = clusters
self.centroids = [self.points[i] for i in clusters.keys()]
self.tree = KDTree(np.array(self.centroids))
def _greedy_cluster_batch(self, all_tree, index_map, batch_map, batch_points,
clusters, clustered_points):
query_tree = KDTree(np.array(batch_points))
result = query_tree.query_ball_tree(all_tree, self.radius, eps=0.1)
for i, pts in enumerate(result):
index_in_all_points = batch_map[i]
if index_in_all_points in clustered_points:
continue
clusters[index_in_all_points] = set([index_in_all_points])
clustered_points.add(index_in_all_points)
pts = {index_map[pt] for pt in pts}
pts -= clustered_points
clusters[index_in_all_points] |= pts
clustered_points |= pts
return clusters, clustered_points
def _cluster_radius(self):
all_dists = []
for centroid, cluster in self.clusters.items():
centroid, dists = self.raw[centroid], []
for point in [self.raw[i] for i in cluster]:
dists.append(needle_distance(centroid, point))
all_dists.append(
|
pd.Series(dists)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
# # 01__motif_model
#
# in this notebook, i find motifs that are significantly associated w/ mean MPRA activity using linear models
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import sys
from itertools import combinations
from scipy.stats import boxcox
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from statsmodels.stats.anova import anova_lm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
from scipy import stats
stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df)
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
from classify_utils import *
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
mpl.rcParams['figure.autolayout'] = False
# In[2]:
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
# In[3]:
np.random.seed(2019)
# In[4]:
QUANT_ALPHA = 0.05
# ## functions
# In[5]:
def calculate_gc(row):
cs = row["index"].count("C")
gs = row["index"].count("G")
gc = (cs+gs)/len(row["index"])
return gc
# In[6]:
def calculate_cpg(row):
cpgs = row["index"].count("CG")
cpg = cpgs/len(row["index"])
return cpg
# In[7]:
def lrtest(llmin, llmax):
lr = 2 * (llmax - llmin)
p = stats.chisqprob(lr, 1) # llmax has 1 dof more than llmin
return lr, p
# In[8]:
def activ_or_repress(row):
if row.beta > 0:
return "activating"
elif row.beta < 0:
return "repressing"
# ## variables
# In[9]:
data_f = "../../../data/02__mpra/02__activs/alpha_per_elem.quantification.txt"
# In[10]:
index_f = "../../../data/01__design/02__index/TWIST_pool4_v8_final.with_element_id.txt.gz"
# In[11]:
tss_map_f = "../../../data/01__design/01__mpra_list/mpra_tss.with_ids.RECLASSIFIED_WITH_MAX.txt"
# In[12]:
motif_dir = "../../../data/04__mapped_motifs/elem_fimo_out"
motifs_f = "%s/fimo.txt.gz" % motif_dir
# In[13]:
elem_map_f = "../../../data/04__mapped_motifs/fastas/elem_map.txt"
# In[14]:
motif_info_dir = "../../../misc/01__motif_info"
motif_map_f = "%s/00__lambert_et_al_files/00__metadata/curated_motif_map.txt" % motif_info_dir
motif_info_f = "%s/00__lambert_et_al_files/00__metadata/motif_info.txt" % motif_info_dir
# In[15]:
hESC_tf_f = "../../../data/03__rna_seq/04__TF_expr/hESC_TF_expression.txt"
mESC_tf_f = "../../../data/03__rna_seq/04__TF_expr/mESC_TF_expression.txt"
# ## 1. import data
# In[16]:
data =
|
pd.read_table(data_f)
|
pandas.read_table
|
from __future__ import print_function, absolute_import, division
import pandas as pd
import numpy as np
import argparse
import json
import math
import re
import os
import sys
import csv
import socket # -- ip checks
import seaborn as sns
import matplotlib.pyplot as plt
from jinja2 import Environment, PackageLoader
# --- functions ---
def get_config(config):
""" convert json config file into a python dict """
with open(config, 'r') as f:
config_dict = json.load(f)[0]
return config_dict
# -- load data --
def get_dataframe(config):
""" load csv into python dataframe """
df = pd.read_csv(config['input_file'], low_memory=False)
return df
# --
def get_overview(config, df):
""" return details of the dataframe and any issues found """
overview_msg = {}
df = df.copy()
column_cnt = len(df.columns)
try:
df['EVENT_TIMESTAMP'] = pd.to_datetime(df[config['required_features']['EVENT_TIMESTAMP']], infer_datetime_format=True)
date_range = df['EVENT_TIMESTAMP'].min().strftime('%Y-%m-%d') + ' to ' + df['EVENT_TIMESTAMP'].max().strftime('%Y-%m-%d')
day_cnt = (df['EVENT_TIMESTAMP'].max() - df['EVENT_TIMESTAMP'].min()).days
except:
overview_msg[config['required_features']['EVENT_TIMESTAMP']] = " Unable to convert" + config['required_features']['EVENT_TIMESTAMP'] + " to timestamp"
date_range = ""
day_cnt = 0
record_cnt = df.shape[0]
memory_size = df.memory_usage(index=True).sum()
record_size = round(float(memory_size) / record_cnt,2)
n_dupe = record_cnt - len(df.drop_duplicates())
if record_cnt <= 10000:
overview_msg["Record count"] = "A minimum of 10,000 rows are required to train the model, your dataset contains " + str(record_cnt)
overview_stats = {
"Record count" : "{:,}".format(record_cnt) ,
"Column count" : "{:,}".format(column_cnt),
"Duplicate count" : "{:,}".format(n_dupe),
"Memory size" : "{:.2f}".format(memory_size/1024**2) + " MB",
"Record size" : "{:,}".format(record_size) + " bytes",
"Date range" : date_range,
"Day count" : "{:,}".format(day_cnt) + " days",
"overview_msg" : overview_msg,
"overview_cnt" : len(overview_msg)
}
return df, overview_stats
def set_feature(row, config):
""" sets the feature type of each variable in the file, identifies features with issues
as well as the required features. this is the first pass of rules
"""
rulehit = 0
feature = ""
message = ""
required_features = config['required_features']
# -- assign numeric --
if ((row._dtype in ['float64', 'int64']) and (row['nunique'] > 1)):
feature = "numeric"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign categorical --
if ((row._dtype == 'object') and ( row.nunique_pct <= 0.75)):
feature = "categorical"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- assign categorical to numerics --
if ((row._dtype in ['float64', 'int64']) and ( row['nunique'] <= 1024 )):
feature = "categorical"
message = "(" + "{:,}".format(row['nunique']) + ") unique"
# -- assign binary --
if (row['nunique'] == 2 ):
feature = "categorical"
message = "(" + "{:}".format(row['nunique']) + ") binary"
# -- single value --
if (row['nunique'] == 1):
rulehit = 1
feature = "exclude"
message = "(" + "{:}".format(row['nunique']) + ") single value"
# -- null pct --
if (row.null_pct >= 0.50 and (rulehit == 0)):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.null_pct*100) + "%) missing "
# -- categorical w. high % unique
if ((row._dtype == 'object') and ( row.nunique_pct >= 0.75)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
# -- numeric w. extreeme % unique
if ((row._dtype in ['float64', 'int64']) and ( row.nunique_pct >= 0.95)) and (rulehit == 0):
rulehit = 1
feature = "exclude"
message = "(" + "{:.2f}".format(row.nunique_pct*100) + "%) unique"
if ('EMAIL_ADDRESS' in required_features) and (row._column == required_features['EMAIL_ADDRESS']):
feature = "EMAIL_ADDRESS"
if ('IP_ADDRESS' in required_features) and (row._column == required_features['IP_ADDRESS']):
feature = "IP_ADDRESS"
if row._column == required_features['EVENT_TIMESTAMP']:
feature = "EVENT_TIMESTAMP"
if row._column == required_features['EVENT_LABEL']:
feature = "EVENT_LABEL"
return feature, message
def get_label(config, df):
""" returns stats on the label and performs intial label checks """
message = {}
label = config['required_features']['EVENT_LABEL']
label_summary = df[label].value_counts()
rowcnt = df.shape[0]
label_dict = {
"label_field" : label,
"label_values" : df[label].unique(),
"label_dtype" : label_summary.dtype,
"fraud_rate" : "{:.2f}".format((label_summary.min()/label_summary.sum())*100),
"fraud_label": str(label_summary.idxmin()),
"fraud_count": label_summary.min(),
"legit_rate" : "{:.2f}".format((label_summary.max()/label_summary.sum())*100),
"legit_count": label_summary.max(),
"legit_label": str(label_summary.idxmax()),
"null_count" : "{:,}".format(df[label].isnull().sum(axis = 0)),
"null_rate" : "{:.2f}".format(df[label].isnull().sum(axis = 0)/rowcnt),
}
"""
label checks
"""
if label_dict['fraud_count'] <= 500:
message['fraud_count'] = "Fraud count " + str(label_dict['fraud_count']) + " is less than 500\n"
if df[label].isnull().sum(axis = 0)/rowcnt >= 0.01:
message['label_nulls'] = "Your LABEL column contains " + label_dict["null_count"] +" a significant number of null values"
label_dict['warnings'] = len(message)
return label_dict, message
def get_partition(config, df):
""" evaluates your dataset partitions and checks the distribution of fraud lables """
df = df.copy()
row_count = df.shape[0]
required_features = config['required_features']
message = {}
stats ={}
try:
df['_event_timestamp'] = pd.to_datetime(df[required_features['EVENT_TIMESTAMP']])
df['_dt'] = pd.to_datetime(df['_event_timestamp'].dt.date)
except:
message['_event_timestamp'] = "could not parse " + required_features['EVENT_TIMESTAMP'] + " into a date or timestamp object"
df['_event_timestamp'] = df[required_features['EVENT_TIMESTAMP']]
df['_dt'] = df['_event_timestamp']
label_summary = df[required_features['EVENT_LABEL']].value_counts()
legit_label = label_summary.idxmax()
fraud_label = label_summary.idxmin()
df = df.sort_values(by=['_event_timestamp']).reset_index(drop=True)
ctab = pd.crosstab(df['_dt'].astype(str), df[required_features['EVENT_LABEL']]).reset_index()
stats['labels'] = ctab['_dt'].tolist()
stats['legit_rates'] = ctab[legit_label].tolist()
stats['fraud_rates'] = ctab[fraud_label].tolist()
# -- set partitions --
df['partition'] = 'training'
df.loc[math.ceil(row_count*.7):math.ceil(row_count*.85),'partition'] = 'evaluation'
df.loc[math.ceil(row_count*.85):,'partition'] = 'testing'
message = ""
return stats, message
def get_stats(config, df):
""" generates the key column analysis statistics calls set_features function """
df = df.copy()
rowcnt = len(df)
df_s1 = df.agg(['count', 'nunique',]).transpose().reset_index().rename(columns={"index":"_column"})
df_s1['count'] = df_s1['count'].astype('int64')
df_s1['nunique'] = df_s1['nunique'].astype('int64')
df_s1["null"] = (rowcnt - df_s1["count"]).astype('int64')
df_s1["not_null"] = rowcnt - df_s1["null"]
df_s1["null_pct"] = df_s1["null"] / rowcnt
df_s1["nunique_pct"] = df_s1['nunique'] / rowcnt
dt = pd.DataFrame(df.dtypes).reset_index().rename(columns={"index":"_column", 0:"_dtype"})
df_stats = pd.merge(dt, df_s1, on='_column', how='inner')
df_stats = df_stats.round(4)
df_stats[['_feature', '_message']] = df_stats.apply(lambda x: set_feature(x,config), axis = 1, result_type="expand")
return df_stats, df_stats.loc[df_stats["_feature"]=="exclude"]
def get_email(config, df):
""" gets the email statisitcs and performs email checks """
message = {}
required_features = config['required_features']
email = required_features['EMAIL_ADDRESS']
email_recs = df.shape[0]
email_null = df[email].isna().sum()
emails = pd.Series(
|
pd.unique(df[email].values)
|
pandas.unique
|
import pandas as pd
import os
import numpy as np
import math
import matplotlib.pyplot as plt
from utils import MAPE, RMSE, MAE
import pvlib
from pvlib import location
from pvlib import irradiance
from pvlib import pvsystem
import datetime
from pysolar.solar import radiation, get_altitude
import pytz
from pvlib.temperature import TEMPERATURE_MODEL_PARAMETERS
from pvlib.pvsystem import PVSystem
from pvlib.modelchain import ModelChain
from pvodataset import PVODataset
class demo_K_PV(PVODataset):
"""
The usage of Inheritance grammar.
Follow Open-Closed Principle and Single Responsibility Principle.
Note: start - end >= 96. (24h with 15m resolution) = 24*6 = 96 timesteps.)
Please modify the pdc0 in Func(init_PV_sys), longitude, latitude in Func(get_dir_rad).
"""
def __init__(self, path='../datasets/', timezone="UTC",params=0):
super(demo_K_PV, self).__init__(path)
self.tz = timezone
print(self.tz)
pass
def read_weather(self, path='../datasets/McClear/s7_clr_data_17-19.csv'):
"""
periods = 500 S5. 96 (S7,S8)
"""
s_clr = pd.read_csv(path)
times = pd.date_range('03-16-2018 16:00', freq='15min', periods=96*2, tz="UTC") #
weather =
|
pd.DataFrame(columns=['ghi', 'dni', 'dhi'], index=times)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
from pandas.util import testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum.")
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = '.'.join([name, 'html'])
filepath = datapath('io', 'formats', 'data', 'html', filename)
with open(filepath, encoding='utf-8') as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=['mixed', 'empty'])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == 'mixed':
df = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
df.loc[:20, 'A'] = np.nan
df.loc[:20, 'B'] = np.nan
return df
elif request.param == 'empty':
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize('col_space', [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize('df,expected', [
(DataFrame({'\u03c3': np.arange(10.)}), 'unicode_1'),
(DataFrame({'A': ['\u03c3']}), 'unicode_2')
])
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({'A': [6.0, 3.1, 2.2]})
result = df.to_html(decimal=',')
expected = expected_html(datapath, 'gh12031_expected_output')
assert result == expected
@pytest.mark.parametrize('kwargs,string,expected', [
(dict(), "<type 'str'>", 'escaped'),
(dict(escape=False), "<b>bold</b>", 'escape_disabled')
])
def test_to_html_escaped(kwargs, string, expected, datapath):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: string,
b: string},
'co>l2': {a: string,
b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('index_is_named', [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)
})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
if index_is_named:
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh8452_expected_output')
assert result == expected
@pytest.mark.parametrize('multi_sparse,expected', [
(False, 'multiindex_sparsify_false_multi_sparse_1'),
(False, 'multiindex_sparsify_false_multi_sparse_2'),
(True, 'multiindex_sparsify_1'),
(True, 'multiindex_sparsify_2')
])
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith('2'):
df.columns = index[::2]
with option_context('display.multi_sparse', multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('max_rows,expected', [
(60, 'gh14882_expected_output_1'),
# Test that ... appears in a middle level
(56, 'gh14882_expected_output_2')
])
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product([[100, 200, 300],
[10, 20, 30],
[1, 2, 3, 4, 5, 6, 7]],
names=['a', 'b', 'c'])
df = DataFrame({'n': range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('df,formatters,expected', [
(DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4)),
{'__index__': lambda x: 'abcd' [x]},
'index_formatter'),
(DataFrame(
{'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{'months': lambda x: x.strftime('%Y-%m')},
'datetime64_monthformatter'),
(DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')}),
{'hod': lambda x: x.strftime('%H:%M')},
'datetime64_hourformatter')
])
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame({
'clé1': ['a', 'a', 'b', 'b', 'a'],
'clé2': ['1er', '2ème', '1er', '2ème', '1er'],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=['clé1'], columns=['clé2'])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start='20010101', freq='D', periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, 'truncate')
assert result == expected
@pytest.mark.parametrize('sparsify,expected', [
(True, 'truncate_multi_index'),
(False, 'truncate_multi_index_sparse_off')
])
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('option,result,expected', [
(None, lambda df: df.to_html(), '1'),
(None, lambda df: df.to_html(border=0), '0'),
(0, lambda df: df.to_html(), '0'),
(0, lambda df: df._repr_html_(), '0'),
])
def test_to_html_border(option, result, expected):
df = DataFrame({'A': [1, 2]})
if option is None:
result = result(df)
else:
with option_context('display.html.border', option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
def test_display_option_warning():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.options.html.border
@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=['B', 'A'], col_space=17)
df.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
df.to_html(columns=['B', 'A'], float_format=str)
df.to_html(columns=['B', 'A'], col_space=12, float_format=str)
@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join('test.html')
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({'x': np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert '<strong' not in result
def test_to_html_columns_arg():
df = DataFrame(tm.getSeriesData())
result = df.to_html(columns=['A'])
assert '<th>B</th>' not in result
@pytest.mark.parametrize('columns,justify,expected', [
(MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))),
names=['CL0', 'CL1']),
'left',
'multiindex_1'),
(MultiIndex.from_tuples(
list(zip(range(4), np.mod(lrange(4), 2)))),
'right',
'multiindex_2')
])
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify=justify)
expected = expected_html(datapath, 'justify').format(justify=justify)
assert result == expected
@pytest.mark.parametrize("justify", ["super-right", "small-left",
"noinherit", "tiny", "pandas"])
def test_to_html_invalid_justify(justify):
# GH 17527
df = DataFrame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
df.to_html(justify=justify)
def test_to_html_index(datapath):
# TODO: split this test
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.nan]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = expected_html(datapath, 'index_1')
assert df.to_html() == expected_with_index
expected_without_index = expected_html(datapath, 'index_2')
result = df.to_html(index=False)
for i in index:
assert i not in result
assert result == expected_without_index
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = expected_html(datapath, 'index_3')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, 'index_4')
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = expected_html(datapath, 'index_5')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize('classes', [
"sortable draggable",
["sortable", "draggable"]
])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, 'with_classes')
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, 'gh14998_expected_output')
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],
codes=[[0, 1, 2], [0, 1, 2]],
names=['b', 'c'])
columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, 'a'])
data = np.array(
[[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])
df = DataFrame(data, index, columns)
result = df.to_html(max_cols=2)
expected = expected_html(datapath, 'gh6131_expected_output')
assert result == expected
def test_to_html_multi_indexes_index_false(datapath):
# GH 22579
df = DataFrame({'a': range(10), 'b': range(10, 20), 'c': range(10, 20),
'd': range(10, 20)})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
df.index = MultiIndex.from_product([['a', 'b'],
['c', 'd', 'e', 'f', 'g']])
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh22579_expected_output')
assert result == expected
@pytest.mark.parametrize('index_names', [True, False])
@pytest.mark.parametrize('header', [True, False])
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('column_index, column_type', [
(Index([0, 1]), 'unnamed_standard'),
(Index([0, 1], name='columns.name'), 'named_standard'),
(
|
MultiIndex.from_product([['a'], ['b', 'c']])
|
pandas.MultiIndex.from_product
|
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
from app.data_process import bp
from datetime import datetime
import pandas as pd
from pathlib import Path
from bs4 import BeautifulSoup
import glob
import os
positivity_replace = {
'ALG':3526,
'BRN':3527,
'CKH':3540,
'DUR':3530,
'EOH':3558,
'GBH':3533,
'HNH':3534,
'HKP':3535,
'HAL':3536,
'HAM':3537,
'HPE':3538,
'HPH':3539,
'KFL':3541,
'LAM':3542,
'LGL':3543,
'MSL':3544,
'NIA':3546,
'NPS':3547,
'NWR':3549,
'OTT':3551,
'PEL':3553,
'PET':3555,
'PQP':3556,
'WAT':3565,
'REN':3557,
'SMD':3560,
'SWH':3575,
'SUD':3561,
'THB':3562,
'TSK':3563,
'TOR':3595,
'WDG':3566,
'WEK':3568,
'YRK':3570,
'overall':6
}
def get_file_path(data, step='raw', today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + step + '/'
if data['type'] != '':
file_name = data['table_name'] + '_' + today + '.' + data['type']
else:
file_name = data['table_name'] + '_' + today
save_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_path = save_dir + '/' + file_name
return file_path, save_dir
@bp.cli.command('public_ontario_gov_daily_change_in_cases_by_phu')
def process_public_ontario_gov_daily_change_in_cases_by_phu():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'daily_change_in_cases_by_phu', 'type': 'csv'}
date_field = ['Date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.melt(id_vars='Date')
replace = {
'Algoma_Public_Health_Unit':3526,
'Algoma_District':3526,
'Brant_County_Health_Unit':3527,
'Brant_County':3527,
'Chatham-Kent_Health_Unit':3540,
'Chatham_Kent':3540,
'Durham_Region_Health_Department':3530,
'Durham_Region':3530,
'Eastern_Ontario_Health_Unit':3558,
'Eastern_Ontario':3558,
'Grey_Bruce_Health_Unit':3533,
'Grey_Bruce':3533,
'Haldimand-Norfolk_Health_Unit':3534,
'Haldimand_Norfolk':3534,
'Haliburton,_Kawartha,_Pine_Ridge_District_Health_Unit':3535,
'Haliburton_Kawartha_Pine_Ridge':3535,
'Halton_Region_Health_Department':3536,
'Halton_Region':3536,
'Hamilton_Public_Health_Services':3537,
'City_of_Hamilton':3537,
'Hastings_and_Prince_Edward_Counties_Health_Unit':3538,
'Hastings_Prince_Edward':3538,
'Huron_Perth_District_Health_Unit':3539,
'Huron_Perth':3539,
'Kingston,_Frontenac_and_Lennox_&_Addington_Public_Health':3541,
'KFLA':3541,
'Lambton_Public_Health':3542,
'Lambton_County':3542,
'Leeds,_Grenville_and_Lanark_District_Health_Unit':3543,
'Leeds_Grenville_Lanark':3543,
'Middlesex-London_Health_Unit':3544,
'Middlesex_London':3544,
'Niagara_Region_Public_Health_Department':3546,
'Niagara_Region':3546,
'North_Bay_Parry_Sound_District_Health_Unit':3547,
'North_Bay_Parry_Sound_District':3547,
'Northwestern_Health_Unit':3549,
'Northwestern':3549,
'Ottawa_Public_Health':3551,
'City_of_Ottawa':3551,
'Peel_Public_Health':3553,
'Peel_Region':3553,
'Peterborough_Public_Health':3555,
'Peterborough_County_City':3555,
'Porcupine_Health_Unit':3556,
'Porcupine':3556,
'Region_of_Waterloo,_Public_Health':3565,
'Waterloo_Region':3565,
'Renfrew_County_and_District_Health_Unit':3557,
'Renfrew_County_and_District':3557,
'Simcoe_Muskoka_District_Health_Unit':3560,
'Simcoe_Muskoka_District':3560,
'Southwestern_Public_Health':3575,
'Southwestern':3575,
'Sudbury_&_District_Health_Unit':3561,
'Sudbury_and_District':3561,
'Thunder_Bay_District_Health_Unit':3562,
'Thunder_Bay_District':3562,
'Timiskaming_Health_Unit':3563,
'Timiskaming':3563,
'Toronto_Public_Health':3595,
'Toronto':3595,
'Wellington-Dufferin-Guelph_Public_Health':3566,
'Wellington_Dufferin_Guelph':3566,
'Windsor-Essex_County_Health_Unit':3568,
'Windsor_Essex_County':3568,
'York_Region_Public_Health_Services':3570,
'York_Region':3570,
'Total':6
}
df['HR_UID'] = df['variable'].replace(replace)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_conposcovidloc')
def process_public_ontario_gov_conposcovidloc():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'conposcovidloc', 'type': 'csv'}
field_map = {
"Row_ID":"row_id",
"Accurate_Episode_Date": "accurate_episode_date",
"Case_Reported_Date": "case_reported_date",
"Specimen_Date": "specimen_reported_date",
"Test_Reported_Date": "test_reported_date",
"Age_Group":"age_group",
"Client_Gender":"client_gender",
"Case_AcquisitionInfo": "case_acquisition_info",
"Outcome1": "outcome_1",
"Outbreak_Related": "outbreak_related",
"Reporting_PHU": "reporting_phu",
"Reporting_PHU_Address": "reporting_phu_address",
"Reporting_PHU_City": "reporting_phu_city",
"Reporting_PHU_Postal_Code": "reporting_phu_postal_code",
"Reporting_PHU_Website": "reporting_phu_website",
"Reporting_PHU_Latitude":"reporting_phu_latitude",
"Reporting_PHU_Longitude": "reporting_phu_longitude",
}
date_field = ['accurate_episode_date', 'case_reported_date', 'specimen_reported_date', 'test_reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.replace("12:00:00 AM", None)
df = df.rename(columns=field_map)
for column in date_field:
df[column] = pd.to_datetime(df[column], errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_vaccination')
def process_public_ontario_gov_vaccination():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'vaccination', 'type': 'csv'}
date_field = ['date']
field_map = {
'report_date': 'date'
}
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df.dropna(how='all', axis=1, inplace=True)
df.dropna(how='any', inplace=True)
for index, row in df.iterrows():
if type(row['previous_day_total_doses_administered'])==str:
df.at[index,'previous_day_doses_administered'] = row['previous_day_doses_administered'].replace(",","")
if type(row['total_doses_administered'])==str:
df.at[index,'total_doses_administered'] = row['total_doses_administered'].replace(",","")
if type(row['total_doses_in_fully_vaccinated_individuals'])==str:
df.at[index,'total_doses_in_fully_vaccinated_individuals'] = row['total_doses_in_fully_vaccinated_individuals'].replace(",","")
if type(row['total_individuals_fully_vaccinated'])==str:
df.at[index,'total_individuals_fully_vaccinated'] = row['total_individuals_fully_vaccinated'].replace(",","")
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ontario_gov_covidtesting')
def process_public_ontario_gov_covidtesting():
data = {'classification':'public', 'source_name':'ontario_gov', 'table_name':'covidtesting', 'type': 'csv'}
date_field = ['reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
to_include = []
for column in df.columns:
name = column.replace(' ','_').lower()
df[name] = df[column]
to_include.append(name)
df = df[to_include]
for column in date_field:
df[column] = pd.to_datetime(df[column])
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_call_reports')
def process_confidential_211_call_reports():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'call_reports', 'type': 'csv'}
field_map = {
"CallReportNum":"call_report_num",
"CallDateAndTimeStart": "call_date_and_time_start",
"Demographics of Inquirer - Age Category": "age_of_inquirer"
}
date_field = ['call_date_and_time_start']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_met_and_unmet_needs')
def process_confidential_211_met_and_unmet_needs():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'met_and_unmet_needs', 'type': 'csv'}
field_map = {
'DateOfCall':'date_of_call',
'ReportNeedNum':'report_need_num',
'AIRSNeedCategory':'airs_need_category'
}
date_field = ['date_of_call']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_211_referrals')
def process_confidential_211_referrals():
data = {'classification':'confidential', 'source_name':'211', 'table_name':'referrals', 'type': 'csv'}
field_map = {
"CallReportNum":"call_report_num",
"DateOfCall": "date_of_call",
"ResourceNum":"resource_num",
"MetOrUnmet":"met_or_unmet"
}
date_field = ['date_of_call']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file,error_bad_lines=False)
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('confidential_burning_glass_industry_weekly')
def process_confidential_burning_glass_industry_weekly():
data = {'classification':'confidential', 'source_name':'burning_glass', 'table_name':'industry_weekly', 'type': 'csv'}
field_map = {
"country_code":"country_code",
"country":"country",
"geography_code":"geography_code",
"geography" :"geography",
"geography_type":"geography_type",
"group_code":"group_code",
"group_name":"group_name",
"group_type":"group_type",
"start_date":"start_date",
"end_date" :"end_date",
"job_postings_count":"job_postings_count",
"population":"population",
}
date_field = ['start_date','end_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file)
df = df.rename(columns=field_map)
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('restricted_ccso_ccis')
def process_restricted_ccso_ccis():
data = {'classification':'restricted', 'source_name':'ccso', 'table_name':'ccis', 'type': 'csv'}
field_map = {
"RegionName":"region",
"LHINName": "lhin",
"CorporationName": "hospital_name",
"SiteName": "site_name",
"UnitInclusion": "unit_inclusion",
"ICUType": "icu_type",
"ICULevel": "icu_level",
"Beds": "critical_care_beds",
"VentedBeds": "vented_beds",
"CCCensus": "critical_care_patients",
"CensusVented": "vented_patients",
"CCCOVID_P_Census": "confirmed_positive",
"CensusCOVID_P_Vented": "confirmed_positive_ventilator",
"REGIONNAME": "region",
"LHINNAME": "lhin",
"CORPORATIONNAME": "hospital_name",
"SITENAME": "site_name",
"UNITINCLUSION": "unit_inclusion",
"ICUTYPE": "icu_type",
"ICULEVEL": "icu_level",
"BEDS": "critical_care_beds",
"VENTEDBEDS": "vented_beds",
"CCCENSUS": "critical_care_patients",
"CENSUSVENTED": "vented_patients",
"CCCOVIDPOSITIVECENSUS": "confirmed_positive",
"CENSUSCOVIDPOSITIVEVENTED": "confirmed_positive_ventilator",
}
date_field = []
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file, encoding="ISO-8859-1")
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = pd.to_datetime(df[column],errors='coerce')
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('restricted_moh_iphis')
def process_restricted_moh_iphis():
data = {'classification':'restricted', 'source_name':'moh', 'table_name':'iphis', 'type': 'csv'}
field_map = {
"pseudo_id": "pseudo_id",
"FSA":"fsa",
"CASE_REPORTED_DATE": "case_reported_date",
"CLIENT_DEATH_DATE": "client_death_date",
"HCW": "hcw"
}
date_field = ['case_reported_date','client_death_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
def convert_date(date):
try:
value = date.split(':')
date_time_str = value[0]
date_time_obj = datetime.strptime(date_time_str, "%d%b%Y")
return date_time_obj
except:
return None
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
save_file, save_dir = get_file_path(data, 'processed', date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
df = pd.read_csv(file,encoding = "ISO-8859-1")
df = df.rename(columns=field_map)
df = df[field_map.values()]
for column in date_field:
df[column] = df[column].apply(convert_date)
df['fsa'] = df['fsa'].str.upper()
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ices_percent_positivity')
def process_public_ices_percent_positivity():
data = {'classification':'public', 'source_name':'ices', 'table_name':'percent_positivity', 'type': 'xlsx'}
date_field = ['reported_date']
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
data_out = {'classification':'public', 'source_name':'ices', 'table_name':'percent_positivity', 'type': 'csv'}
save_file, save_dir = get_file_path(data_out, 'processed', today=date)
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
dfs = []
for i in range(3,8):
dfs.append(pd.read_excel(file,engine='openpyxl', sheet_name=i, skiprows=28))
df = pd.concat(dfs)
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_file, index=False)
except Exception as e:
print(f"Failed to get {file}")
print(e)
return e
@bp.cli.command('public_ices_vaccination')
def process_public_ices_vaccination():
data = {'classification':'public', 'source_name':'ices', 'table_name':'vaccination', 'type': 'xlsx'}
load_file, load_dir = get_file_path(data)
files = glob.glob(load_dir+"/*."+data['type'])
for file in files:
try:
filename = file.split('_')[-1]
date = filename.split('.')[0]
data_out = {'classification':'public', 'source_name':'ices', 'table_name':'vaccination', 'type': 'csv'}
save_file, save_dir = get_file_path(data_out, 'processed')
if not os.path.isfile(save_file) or date == datetime.today().strftime('%Y-%m-%d'):
date = pd.read_excel(file,engine='openpyxl',sheet_name=2, header=11)
date_text = date.iloc[0,0]
update_date_text = date_text.split()[-1]
update_date =
|
pd.to_datetime(update_date_text,format="%d%b%Y")
|
pandas.to_datetime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.